From 4e3213f3bdc10b4b45a30f57ef037d8f5fa49822 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 17 Aug 2015 10:30:35 -0600 Subject: [PATCH] godep: vendor all dependencies & add circle-test.sh Vendor dependencies and use circle-test.sh to run CI process, because the CircleCI autobuild operations are not compatible with using godep. --- .gitignore | 1 - Godeps/Godeps.json | 210 + Godeps/Readme | 5 + Godeps/_workspace/.gitignore | 2 + .../github.com/fsouza/go-dockerclient.a | 1806 +++++++ .../external/github.com/Sirupsen/logrus.a | 794 ++++ .../external/github.com/docker/docker/opts.a | 400 ++ .../github.com/docker/docker/volume.a | Bin 0 -> 38914 bytes .../github.com/docker/libcontainer/user.a | Bin 0 -> 125556 bytes .../external/github.com/gorilla/context.a | 434 ++ .../external/github.com/gorilla/mux.a | 1035 ++++ .../opencontainers/runc/libcontainer/user.a | Bin 0 -> 126996 bytes .../fsouza/go-dockerclient/testing.a | 1236 +++++ .../src/github.com/Shopify/sarama/.gitignore | 24 + .../src/github.com/Shopify/sarama/.travis.yml | 41 + .../github.com/Shopify/sarama/CHANGELOG.md | 157 + .../github.com/Shopify/sarama/CONTRIBUTING.md | 31 + .../src/github.com/Shopify/sarama/MIT-LICENSE | 20 + .../src/github.com/Shopify/sarama/Makefile | 24 + .../src/github.com/Shopify/sarama/README.md | 31 + .../src/github.com/Shopify/sarama/Vagrantfile | 22 + .../Shopify/sarama/async_producer.go | 924 ++++ .../Shopify/sarama/async_producer_test.go | 743 +++ .../src/github.com/Shopify/sarama/broker.go | 385 ++ .../github.com/Shopify/sarama/broker_test.go | 177 + .../src/github.com/Shopify/sarama/client.go | 727 +++ .../github.com/Shopify/sarama/client_test.go | 608 +++ .../src/github.com/Shopify/sarama/config.go | 275 ++ .../github.com/Shopify/sarama/config_test.go | 10 + .../src/github.com/Shopify/sarama/consumer.go | 676 +++ .../sarama/consumer_metadata_request.go | 22 + .../sarama/consumer_metadata_request_test.go | 19 + .../sarama/consumer_metadata_response.go | 73 + .../sarama/consumer_metadata_response_test.go | 35 + .../Shopify/sarama/consumer_test.go | 844 ++++ .../github.com/Shopify/sarama/crc32_field.go | 35 + .../Shopify/sarama/encoder_decoder.go | 62 + .../src/github.com/Shopify/sarama/errors.go | 146 + .../Shopify/sarama/examples/README.md | 9 + .../sarama/examples/http_server/.gitignore | 2 + .../sarama/examples/http_server/README.md | 7 + .../examples/http_server/http_server.go | 246 + .../examples/http_server/http_server_test.go | 109 + .../Shopify/sarama/fetch_request.go | 123 + .../Shopify/sarama/fetch_request_test.go | 34 + .../Shopify/sarama/fetch_response.go | 173 + .../Shopify/sarama/fetch_response_test.go | 84 + .../Shopify/sarama/functional_client_test.go | 90 + .../sarama/functional_consumer_test.go | 61 + .../sarama/functional_producer_test.go | 203 + .../Shopify/sarama/functional_test.go | 146 + .../github.com/Shopify/sarama/length_field.go | 29 + .../src/github.com/Shopify/sarama/message.go | 154 + .../github.com/Shopify/sarama/message_set.go | 89 + .../github.com/Shopify/sarama/message_test.go | 113 + .../Shopify/sarama/metadata_request.go | 48 + .../Shopify/sarama/metadata_request_test.go | 29 + .../Shopify/sarama/metadata_response.go | 227 + .../Shopify/sarama/metadata_response_test.go | 139 + .../Shopify/sarama/mockbroker_test.go | 273 ++ .../Shopify/sarama/mockresponses_test.go | 411 ++ .../github.com/Shopify/sarama/mocks/README.md | 13 + .../Shopify/sarama/mocks/async_producer.go | 142 + .../sarama/mocks/async_producer_test.go | 94 + .../Shopify/sarama/mocks/consumer.go | 316 ++ .../Shopify/sarama/mocks/consumer_test.go | 249 + .../github.com/Shopify/sarama/mocks/mocks.go | 43 + .../Shopify/sarama/mocks/sync_producer.go | 93 + .../sarama/mocks/sync_producer_test.go | 98 + .../Shopify/sarama/offset_commit_request.go | 172 + .../sarama/offset_commit_request_test.go | 90 + .../Shopify/sarama/offset_commit_response.go | 73 + .../sarama/offset_commit_response_test.go | 24 + .../Shopify/sarama/offset_fetch_request.go | 71 + .../sarama/offset_fetch_request_test.go | 31 + .../Shopify/sarama/offset_fetch_response.go | 131 + .../sarama/offset_fetch_response_test.go | 22 + .../Shopify/sarama/offset_request.go | 113 + .../Shopify/sarama/offset_request_test.go | 26 + .../Shopify/sarama/offset_response.go | 130 + .../Shopify/sarama/offset_response_test.go | 62 + .../Shopify/sarama/packet_decoder.go | 44 + .../Shopify/sarama/packet_encoder.go | 41 + .../github.com/Shopify/sarama/partitioner.go | 120 + .../Shopify/sarama/partitioner_test.go | 198 + .../github.com/Shopify/sarama/prep_encoder.go | 95 + .../Shopify/sarama/produce_request.go | 148 + .../Shopify/sarama/produce_request_test.go | 47 + .../Shopify/sarama/produce_response.go | 112 + .../Shopify/sarama/produce_response_test.go | 67 + .../github.com/Shopify/sarama/real_decoder.go | 225 + .../github.com/Shopify/sarama/real_encoder.go | 100 + .../src/github.com/Shopify/sarama/request.go | 100 + .../github.com/Shopify/sarama/request_test.go | 80 + .../Shopify/sarama/response_header.go | 21 + .../Shopify/sarama/response_header_test.go | 21 + .../src/github.com/Shopify/sarama/sarama.go | 47 + .../src/github.com/Shopify/sarama/snappy.go | 41 + .../github.com/Shopify/sarama/snappy_test.go | 49 + .../Shopify/sarama/sync_producer.go | 94 + .../Shopify/sarama/sync_producer_test.go | 149 + .../github.com/Shopify/sarama/tools/README.md | 10 + .../tools/kafka-console-consumer/.gitignore | 2 + .../tools/kafka-console-consumer/README.md | 29 + .../kafka-console-consumer.go | 145 + .../.gitignore | 2 + .../kafka-console-partitionconsumer/README.md | 28 + .../kafka-console-partitionconsumer.go | 102 + .../tools/kafka-console-producer/.gitignore | 2 + .../tools/kafka-console-producer/README.md | 34 + .../kafka-console-producer.go | 118 + .../src/github.com/Shopify/sarama/utils.go | 89 + .../Shopify/sarama/vagrant/boot_cluster.sh | 22 + .../Shopify/sarama/vagrant/create_topics.sh | 8 + .../Shopify/sarama/vagrant/install_cluster.sh | 49 + .../Shopify/sarama/vagrant/kafka.conf | 5 + .../Shopify/sarama/vagrant/provision.sh | 15 + .../Shopify/sarama/vagrant/run_toxiproxy.sh | 22 + .../Shopify/sarama/vagrant/server.properties | 126 + .../Shopify/sarama/vagrant/setup_services.sh | 29 + .../Shopify/sarama/vagrant/toxiproxy.conf | 6 + .../Shopify/sarama/vagrant/zookeeper.conf | 4 + .../sarama/vagrant/zookeeper.properties | 36 + .../src/github.com/Sirupsen/logrus/.gitignore | 1 + .../github.com/Sirupsen/logrus/.travis.yml | 8 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 41 + .../src/github.com/Sirupsen/logrus/LICENSE | 21 + .../src/github.com/Sirupsen/logrus/README.md | 356 ++ .../src/github.com/Sirupsen/logrus/entry.go | 256 + .../github.com/Sirupsen/logrus/entry_test.go | 53 + .../Sirupsen/logrus/examples/basic/basic.go | 50 + .../Sirupsen/logrus/examples/hook/hook.go | 30 + .../github.com/Sirupsen/logrus/exported.go | 188 + .../github.com/Sirupsen/logrus/formatter.go | 48 + .../Sirupsen/logrus/formatter_bench_test.go | 98 + .../logrus/formatters/logstash/logstash.go | 56 + .../formatters/logstash/logstash_test.go | 52 + .../github.com/Sirupsen/logrus/hook_test.go | 122 + .../src/github.com/Sirupsen/logrus/hooks.go | 34 + .../logrus/hooks/airbrake/airbrake.go | 54 + .../logrus/hooks/airbrake/airbrake_test.go | 133 + .../Sirupsen/logrus/hooks/bugsnag/bugsnag.go | 68 + .../logrus/hooks/bugsnag/bugsnag_test.go | 64 + .../logrus/hooks/papertrail/README.md | 28 + .../logrus/hooks/papertrail/papertrail.go | 55 + .../hooks/papertrail/papertrail_test.go | 26 + .../Sirupsen/logrus/hooks/sentry/README.md | 111 + .../Sirupsen/logrus/hooks/sentry/sentry.go | 137 + .../logrus/hooks/sentry/sentry_test.go | 154 + .../Sirupsen/logrus/hooks/syslog/README.md | 20 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 + .../logrus/hooks/syslog/syslog_test.go | 26 + .../Sirupsen/logrus/json_formatter.go | 41 + .../Sirupsen/logrus/json_formatter_test.go | 120 + .../src/github.com/Sirupsen/logrus/logger.go | 206 + .../src/github.com/Sirupsen/logrus/logrus.go | 94 + .../github.com/Sirupsen/logrus/logrus_test.go | 301 ++ .../Sirupsen/logrus/terminal_bsd.go | 9 + .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 159 + .../Sirupsen/logrus/text_formatter_test.go | 61 + .../src/github.com/Sirupsen/logrus/writer.go | 31 + .../github.com/armon/go-metrics/.gitignore | 22 + .../src/github.com/armon/go-metrics/LICENSE | 20 + .../src/github.com/armon/go-metrics/README.md | 71 + .../github.com/armon/go-metrics/const_unix.go | 12 + .../armon/go-metrics/const_windows.go | 13 + .../src/github.com/armon/go-metrics/inmem.go | 241 + .../armon/go-metrics/inmem_signal.go | 100 + .../armon/go-metrics/inmem_signal_test.go | 46 + .../github.com/armon/go-metrics/inmem_test.go | 104 + .../github.com/armon/go-metrics/metrics.go | 115 + .../armon/go-metrics/metrics_test.go | 262 + .../armon/go-metrics/prometheus/prometheus.go | 88 + .../src/github.com/armon/go-metrics/sink.go | 52 + .../github.com/armon/go-metrics/sink_test.go | 120 + .../src/github.com/armon/go-metrics/start.go | 95 + .../github.com/armon/go-metrics/start_test.go | 110 + .../src/github.com/armon/go-metrics/statsd.go | 154 + .../armon/go-metrics/statsd_test.go | 105 + .../github.com/armon/go-metrics/statsite.go | 142 + .../armon/go-metrics/statsite_test.go | 101 + .../src/github.com/boltdb/bolt/.gitignore | 4 + .../src/github.com/boltdb/bolt/LICENSE | 20 + .../src/github.com/boltdb/bolt/Makefile | 54 + .../src/github.com/boltdb/bolt/README.md | 621 +++ .../src/github.com/boltdb/bolt/batch.go | 138 + .../boltdb/bolt/batch_benchmark_test.go | 170 + .../boltdb/bolt/batch_example_test.go | 148 + .../src/github.com/boltdb/bolt/batch_test.go | 167 + .../src/github.com/boltdb/bolt/bolt_386.go | 7 + .../src/github.com/boltdb/bolt/bolt_amd64.go | 7 + .../src/github.com/boltdb/bolt/bolt_arm.go | 7 + .../src/github.com/boltdb/bolt/bolt_linux.go | 12 + .../github.com/boltdb/bolt/bolt_openbsd.go | 29 + .../src/github.com/boltdb/bolt/bolt_test.go | 36 + .../src/github.com/boltdb/bolt/bolt_unix.go | 100 + .../github.com/boltdb/bolt/bolt_windows.go | 76 + .../github.com/boltdb/bolt/boltsync_unix.go | 10 + .../src/github.com/boltdb/bolt/bucket.go | 743 +++ .../src/github.com/boltdb/bolt/bucket_test.go | 1169 +++++ .../github.com/boltdb/bolt/cmd/bolt/main.go | 1529 ++++++ .../boltdb/bolt/cmd/bolt/main_test.go | 145 + .../src/github.com/boltdb/bolt/cursor.go | 384 ++ .../src/github.com/boltdb/bolt/cursor_test.go | 511 ++ .../src/github.com/boltdb/bolt/db.go | 792 ++++ .../src/github.com/boltdb/bolt/db_test.go | 903 ++++ .../src/github.com/boltdb/bolt/doc.go | 44 + .../src/github.com/boltdb/bolt/errors.go | 70 + .../src/github.com/boltdb/bolt/freelist.go | 242 + .../github.com/boltdb/bolt/freelist_test.go | 156 + .../src/github.com/boltdb/bolt/node.go | 636 +++ .../src/github.com/boltdb/bolt/node_test.go | 156 + .../src/github.com/boltdb/bolt/page.go | 172 + .../src/github.com/boltdb/bolt/page_test.go | 72 + .../src/github.com/boltdb/bolt/quick_test.go | 79 + .../github.com/boltdb/bolt/simulation_test.go | 327 ++ .../src/github.com/boltdb/bolt/tx.go | 611 +++ .../src/github.com/boltdb/bolt/tx_test.go | 456 ++ .../github.com/cenkalti/backoff/.gitignore | 22 + .../github.com/cenkalti/backoff/.travis.yml | 2 + .../src/github.com/cenkalti/backoff/LICENSE | 20 + .../src/github.com/cenkalti/backoff/README.md | 116 + .../cenkalti/backoff/adv_example_test.go | 117 + .../github.com/cenkalti/backoff/backoff.go | 59 + .../cenkalti/backoff/backoff_test.go | 27 + .../cenkalti/backoff/example_test.go | 51 + .../cenkalti/backoff/exponential.go | 151 + .../cenkalti/backoff/exponential_test.go | 108 + .../src/github.com/cenkalti/backoff/retry.go | 46 + .../github.com/cenkalti/backoff/retry_test.go | 34 + .../src/github.com/cenkalti/backoff/ticker.go | 79 + .../cenkalti/backoff/ticker_test.go | 45 + .../dancannon/gorethink/encoding/cache.go | 258 + .../dancannon/gorethink/encoding/decoder.go | 141 + .../gorethink/encoding/decoder_test.go | 426 ++ .../gorethink/encoding/decoder_types.go | 524 ++ .../dancannon/gorethink/encoding/encoder.go | 89 + .../gorethink/encoding/encoder_test.go | 262 + .../gorethink/encoding/encoder_types.go | 303 ++ .../dancannon/gorethink/encoding/encoding.go | 32 + .../dancannon/gorethink/encoding/errors.go | 102 + .../dancannon/gorethink/encoding/fold.go | 139 + .../dancannon/gorethink/encoding/tags.go | 69 + .../dancannon/gorethink/encoding/utils.go | 72 + .../dancannon/gorethink/ql2/ql2.pb.go | 1243 +++++ .../dancannon/gorethink/ql2/ql2.proto | 805 ++++ .../dancannon/gorethink/types/geometry.go | 225 + .../eapache/go-resiliency/breaker/README.md | 34 + .../eapache/go-resiliency/breaker/breaker.go | 161 + .../go-resiliency/breaker/breaker_test.go | 196 + .../src/github.com/eapache/queue/.gitignore | 23 + .../src/github.com/eapache/queue/.travis.yml | 7 + .../src/github.com/eapache/queue/LICENSE | 21 + .../src/github.com/eapache/queue/README.md | 16 + .../src/github.com/eapache/queue/queue.go | 88 + .../github.com/eapache/queue/queue_test.go | 162 + .../fsouza/go-dockerclient/.travis.yml | 11 + .../github.com/fsouza/go-dockerclient/AUTHORS | 91 + .../fsouza/go-dockerclient/DOCKER-LICENSE | 6 + .../github.com/fsouza/go-dockerclient/LICENSE | 22 + .../fsouza/go-dockerclient/Makefile | 47 + .../fsouza/go-dockerclient/README.markdown | 106 + .../github.com/fsouza/go-dockerclient/auth.go | 133 + .../fsouza/go-dockerclient/auth_test.go | 91 + .../fsouza/go-dockerclient/build_test.go | 144 + .../fsouza/go-dockerclient/change.go | 43 + .../fsouza/go-dockerclient/change_test.go | 26 + .../fsouza/go-dockerclient/client.go | 835 ++++ .../fsouza/go-dockerclient/client_test.go | 422 ++ .../fsouza/go-dockerclient/container.go | 1058 +++++ .../fsouza/go-dockerclient/container_test.go | 1941 ++++++++ .../github.com/fsouza/go-dockerclient/env.go | 168 + .../fsouza/go-dockerclient/env_test.go | 351 ++ .../fsouza/go-dockerclient/event.go | 305 ++ .../fsouza/go-dockerclient/event_test.go | 132 + .../fsouza/go-dockerclient/example_test.go | 168 + .../github.com/fsouza/go-dockerclient/exec.go | 185 + .../fsouza/go-dockerclient/exec_test.go | 262 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 26 + .../github.com/Sirupsen/logrus/LICENSE | 21 + .../github.com/Sirupsen/logrus/README.md | 355 ++ .../github.com/Sirupsen/logrus/entry.go | 254 + .../github.com/Sirupsen/logrus/entry_test.go | 53 + .../github.com/Sirupsen/logrus/exported.go | 188 + .../github.com/Sirupsen/logrus/formatter.go | 48 + .../Sirupsen/logrus/formatter_bench_test.go | 98 + .../github.com/Sirupsen/logrus/hook_test.go | 122 + .../github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/json_formatter.go | 41 + .../Sirupsen/logrus/json_formatter_test.go | 120 + .../github.com/Sirupsen/logrus/logger.go | 206 + .../github.com/Sirupsen/logrus/logrus.go | 94 + .../github.com/Sirupsen/logrus/logrus_test.go | 301 ++ .../Sirupsen/logrus/terminal_bsd.go | 9 + .../Sirupsen/logrus/terminal_freebsd.go | 20 + .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../Sirupsen/logrus/terminal_openbsd.go | 7 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 158 + .../Sirupsen/logrus/text_formatter_test.go | 61 + .../github.com/Sirupsen/logrus/writer.go | 31 + .../github.com/docker/docker/opts/envfile.go | 62 + .../docker/docker/opts/envfile_test.go | 133 + .../docker/docker/opts/hosts_unix.go | 7 + .../docker/docker/opts/hosts_windows.go | 7 + .../github.com/docker/docker/opts/ip.go | 35 + .../github.com/docker/docker/opts/ip_test.go | 54 + .../github.com/docker/docker/opts/opts.go | 323 ++ .../docker/docker/opts/opts_test.go | 479 ++ .../github.com/docker/docker/opts/ulimit.go | 47 + .../docker/docker/opts/ulimit_test.go | 42 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 902 ++++ .../docker/docker/pkg/archive/archive_test.go | 1204 +++++ .../docker/docker/pkg/archive/archive_unix.go | 89 + .../docker/pkg/archive/archive_unix_test.go | 60 + .../docker/pkg/archive/archive_windows.go | 50 + .../pkg/archive/archive_windows_test.go | 65 + .../docker/docker/pkg/archive/changes.go | 383 ++ .../docker/pkg/archive/changes_linux.go | 285 ++ .../docker/pkg/archive/changes_other.go | 97 + .../docker/pkg/archive/changes_posix_test.go | 127 + .../docker/docker/pkg/archive/changes_test.go | 495 ++ .../docker/docker/pkg/archive/changes_unix.go | 27 + .../docker/pkg/archive/changes_windows.go | 20 + .../docker/docker/pkg/archive/copy.go | 308 ++ .../docker/docker/pkg/archive/copy_test.go | 637 +++ .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 210 + .../docker/docker/pkg/archive/diff_test.go | 190 + .../docker/pkg/archive/example_changes.go | 97 + .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/utils_test.go | 166 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/archive/wrap_test.go | 98 + .../docker/docker/pkg/fileutils/fileutils.go | 196 + .../docker/pkg/fileutils/fileutils_test.go | 402 ++ .../docker/docker/pkg/homedir/homedir.go | 39 + .../docker/docker/pkg/homedir/homedir_test.go | 24 + .../docker/docker/pkg/ioutils/fmt.go | 14 + .../docker/docker/pkg/ioutils/fmt_test.go | 17 + .../docker/docker/pkg/ioutils/multireader.go | 226 + .../docker/pkg/ioutils/multireader_test.go | 149 + .../docker/docker/pkg/ioutils/readers.go | 254 + .../docker/docker/pkg/ioutils/readers_test.go | 216 + .../docker/docker/pkg/ioutils/scheduler.go | 6 + .../docker/pkg/ioutils/scheduler_gccgo.go | 13 + .../docker/docker/pkg/ioutils/writeflusher.go | 47 + .../docker/docker/pkg/ioutils/writers.go | 60 + .../docker/docker/pkg/ioutils/writers_test.go | 65 + .../docker/docker/pkg/mflag/LICENSE | 27 + .../docker/docker/pkg/mflag/README.md | 40 + .../docker/docker/pkg/mflag/flag.go | 1201 +++++ .../docker/docker/pkg/mflag/flag_test.go | 516 ++ .../docker/docker/pkg/parsers/parsers.go | 187 + .../docker/docker/pkg/parsers/parsers_test.go | 210 + .../docker/docker/pkg/pools/pools.go | 119 + .../docker/docker/pkg/pools/pools_test.go | 162 + .../docker/docker/pkg/promise/promise.go | 11 + .../docker/docker/pkg/stdcopy/stdcopy.go | 168 + .../docker/docker/pkg/stdcopy/stdcopy_test.go | 85 + .../docker/docker/pkg/system/errors.go | 9 + .../docker/pkg/system/events_windows.go | 83 + .../docker/docker/pkg/system/filesys.go | 11 + .../docker/pkg/system/filesys_windows.go | 64 + .../docker/docker/pkg/system/lstat.go | 19 + .../docker/docker/pkg/system/lstat_test.go | 28 + .../docker/docker/pkg/system/lstat_windows.go | 29 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 71 + .../docker/pkg/system/meminfo_linux_test.go | 38 + .../docker/pkg/system/meminfo_unsupported.go | 7 + .../docker/pkg/system/meminfo_windows.go | 44 + .../docker/docker/pkg/system/mknod.go | 20 + .../docker/docker/pkg/system/mknod_windows.go | 11 + .../docker/docker/pkg/system/stat.go | 46 + .../docker/docker/pkg/system/stat_freebsd.go | 27 + .../docker/docker/pkg/system/stat_linux.go | 33 + .../docker/docker/pkg/system/stat_test.go | 37 + .../docker/pkg/system/stat_unsupported.go | 17 + .../docker/docker/pkg/system/stat_windows.go | 36 + .../docker/docker/pkg/system/umask.go | 11 + .../docker/docker/pkg/system/umask_windows.go | 8 + .../docker/docker/pkg/system/utimes_darwin.go | 11 + .../docker/pkg/system/utimes_freebsd.go | 24 + .../docker/docker/pkg/system/utimes_linux.go | 28 + .../docker/docker/pkg/system/utimes_test.go | 66 + .../docker/pkg/system/utimes_unsupported.go | 13 + .../docker/docker/pkg/system/xattrs_linux.go | 59 + .../docker/pkg/system/xattrs_unsupported.go | 11 + .../docker/docker/pkg/ulimit/ulimit.go | 111 + .../docker/docker/pkg/ulimit/ulimit_test.go | 55 + .../docker/docker/pkg/units/duration.go | 33 + .../docker/docker/pkg/units/duration_test.go | 46 + .../docker/docker/pkg/units/size.go | 95 + .../docker/docker/pkg/units/size_test.go | 108 + .../github.com/docker/docker/volume/volume.go | 61 + .../docker/libcontainer/user/MAINTAINERS | 2 + .../docker/libcontainer/user/lookup.go | 108 + .../docker/libcontainer/user/lookup_unix.go | 30 + .../libcontainer/user/lookup_unsupported.go | 21 + .../docker/libcontainer/user/user.go | 407 ++ .../docker/libcontainer/user/user_test.go | 443 ++ .../github.com/gorilla/context/LICENSE | 27 + .../github.com/gorilla/context/README.md | 7 + .../github.com/gorilla/context/context.go | 143 + .../gorilla/context/context_test.go | 161 + .../github.com/gorilla/context/doc.go | 82 + .../external/github.com/gorilla/mux/LICENSE | 27 + .../external/github.com/gorilla/mux/README.md | 235 + .../github.com/gorilla/mux/bench_test.go | 21 + .../external/github.com/gorilla/mux/doc.go | 206 + .../external/github.com/gorilla/mux/mux.go | 469 ++ .../github.com/gorilla/mux/mux_test.go | 1334 ++++++ .../github.com/gorilla/mux/old_test.go | 714 +++ .../external/github.com/gorilla/mux/regexp.go | 317 ++ .../external/github.com/gorilla/mux/route.go | 603 +++ .../runc/libcontainer/user/MAINTAINERS | 2 + .../runc/libcontainer/user/lookup.go | 108 + .../runc/libcontainer/user/lookup_unix.go | 30 + .../libcontainer/user/lookup_unsupported.go | 21 + .../runc/libcontainer/user/user.go | 413 ++ .../runc/libcontainer/user/user_test.go | 436 ++ .../fsouza/go-dockerclient/image.go | 537 +++ .../fsouza/go-dockerclient/image_test.go | 967 ++++ .../github.com/fsouza/go-dockerclient/misc.go | 59 + .../fsouza/go-dockerclient/misc_test.go | 159 + .../fsouza/go-dockerclient/network.go | 127 + .../fsouza/go-dockerclient/network_test.go | 96 + .../fsouza/go-dockerclient/signal.go | 49 + .../github.com/fsouza/go-dockerclient/tar.go | 117 + .../testing/data/.dockerignore | 3 + .../go-dockerclient/testing/data/Dockerfile | 15 + .../go-dockerclient/testing/data/barfile | 0 .../go-dockerclient/testing/data/ca.pem | 18 + .../go-dockerclient/testing/data/cert.pem | 18 + .../testing/data/container.tar | Bin 0 -> 2048 bytes .../testing/data/dockerfile.tar | Bin 0 -> 2560 bytes .../go-dockerclient/testing/data/foofile | 0 .../go-dockerclient/testing/data/key.pem | 27 + .../go-dockerclient/testing/data/server.pem | 18 + .../testing/data/serverkey.pem | 27 + .../go-dockerclient/testing/data/symlink | 1 + .../fsouza/go-dockerclient/testing/server.go | 1062 +++++ .../go-dockerclient/testing/server_test.go | 1784 +++++++ .../github.com/fsouza/go-dockerclient/tls.go | 100 + .../github.com/go-sql-driver/mysql/.gitignore | 8 + .../go-sql-driver/mysql/.travis.yml | 10 + .../github.com/go-sql-driver/mysql/AUTHORS | 44 + .../go-sql-driver/mysql/CHANGELOG.md | 92 + .../go-sql-driver/mysql/CONTRIBUTING.md | 40 + .../github.com/go-sql-driver/mysql/LICENSE | 373 ++ .../github.com/go-sql-driver/mysql/README.md | 386 ++ .../go-sql-driver/mysql/appengine.go | 19 + .../go-sql-driver/mysql/benchmark_test.go | 246 + .../github.com/go-sql-driver/mysql/buffer.go | 136 + .../go-sql-driver/mysql/collations.go | 250 + .../go-sql-driver/mysql/connection.go | 403 ++ .../github.com/go-sql-driver/mysql/const.go | 162 + .../github.com/go-sql-driver/mysql/driver.go | 149 + .../go-sql-driver/mysql/driver_test.go | 1681 +++++++ .../github.com/go-sql-driver/mysql/errors.go | 131 + .../go-sql-driver/mysql/errors_test.go | 42 + .../github.com/go-sql-driver/mysql/infile.go | 164 + .../github.com/go-sql-driver/mysql/packets.go | 1179 +++++ .../github.com/go-sql-driver/mysql/result.go | 22 + .../github.com/go-sql-driver/mysql/rows.go | 106 + .../go-sql-driver/mysql/statement.go | 150 + .../go-sql-driver/mysql/transaction.go | 31 + .../github.com/go-sql-driver/mysql/utils.go | 973 ++++ .../go-sql-driver/mysql/utils_test.go | 346 ++ .../github.com/gogo/protobuf/proto/Makefile | 43 + .../gogo/protobuf/proto/all_test.go | 2104 ++++++++ .../github.com/gogo/protobuf/proto/clone.go | 217 + .../gogo/protobuf/proto/clone_test.go | 245 + .../github.com/gogo/protobuf/proto/decode.go | 832 ++++ .../gogo/protobuf/proto/decode_gogo.go | 175 + .../github.com/gogo/protobuf/proto/encode.go | 1293 +++++ .../gogo/protobuf/proto/encode_gogo.go | 354 ++ .../github.com/gogo/protobuf/proto/equal.go | 256 + .../gogo/protobuf/proto/equal_test.go | 191 + .../gogo/protobuf/proto/extensions.go | 519 ++ .../gogo/protobuf/proto/extensions_gogo.go | 221 + .../gogo/protobuf/proto/extensions_test.go | 292 ++ .../src/github.com/gogo/protobuf/proto/lib.go | 841 ++++ .../gogo/protobuf/proto/lib_gogo.go | 40 + .../gogo/protobuf/proto/message_set.go | 287 ++ .../gogo/protobuf/proto/message_set_test.go | 66 + .../gogo/protobuf/proto/pointer_reflect.go | 479 ++ .../gogo/protobuf/proto/pointer_unsafe.go | 266 ++ .../protobuf/proto/pointer_unsafe_gogo.go | 108 + .../gogo/protobuf/proto/properties.go | 815 ++++ .../gogo/protobuf/proto/properties_gogo.go | 64 + .../protobuf/proto/proto3_proto/proto3.pb.go | 122 + .../protobuf/proto/proto3_proto/proto3.proto | 68 + .../gogo/protobuf/proto/proto3_test.go | 125 + .../gogo/protobuf/proto/size2_test.go | 63 + .../gogo/protobuf/proto/size_test.go | 142 + .../gogo/protobuf/proto/skip_gogo.go | 117 + .../gogo/protobuf/proto/testdata/Makefile | 37 + .../protobuf/proto/testdata/golden_test.go | 86 + .../gogo/protobuf/proto/testdata/test.pb.go | 2746 +++++++++++ .../protobuf/proto/testdata/test.pb.go.golden | 1737 +++++++ .../gogo/protobuf/proto/testdata/test.proto | 480 ++ .../github.com/gogo/protobuf/proto/text.go | 804 ++++ .../gogo/protobuf/proto/text_gogo.go | 55 + .../gogo/protobuf/proto/text_parser.go | 815 ++++ .../gogo/protobuf/proto/text_parser_test.go | 511 ++ .../gogo/protobuf/proto/text_test.go | 450 ++ .../github.com/golang/protobuf/proto/Makefile | 43 + .../golang/protobuf/proto/all_test.go | 2104 ++++++++ .../github.com/golang/protobuf/proto/clone.go | 212 + .../golang/protobuf/proto/clone_test.go | 245 + .../golang/protobuf/proto/decode.go | 827 ++++ .../golang/protobuf/proto/encode.go | 1293 +++++ .../github.com/golang/protobuf/proto/equal.go | 256 + .../golang/protobuf/proto/equal_test.go | 191 + .../golang/protobuf/proto/extensions.go | 400 ++ .../golang/protobuf/proto/extensions_test.go | 292 ++ .../github.com/golang/protobuf/proto/lib.go | 841 ++++ .../golang/protobuf/proto/message_set.go | 287 ++ .../golang/protobuf/proto/message_set_test.go | 66 + .../golang/protobuf/proto/pointer_reflect.go | 479 ++ .../golang/protobuf/proto/pointer_unsafe.go | 266 ++ .../golang/protobuf/proto/properties.go | 742 +++ .../protobuf/proto/proto3_proto/proto3.pb.go | 122 + .../protobuf/proto/proto3_proto/proto3.proto | 68 + .../golang/protobuf/proto/proto3_test.go | 125 + .../golang/protobuf/proto/size2_test.go | 63 + .../golang/protobuf/proto/size_test.go | 142 + .../golang/protobuf/proto/testdata/Makefile | 50 + .../protobuf/proto/testdata/golden_test.go | 86 + .../golang/protobuf/proto/testdata/test.pb.go | 2746 +++++++++++ .../golang/protobuf/proto/testdata/test.proto | 480 ++ .../github.com/golang/protobuf/proto/text.go | 769 +++ .../golang/protobuf/proto/text_parser.go | 772 +++ .../golang/protobuf/proto/text_parser_test.go | 511 ++ .../golang/protobuf/proto/text_test.go | 450 ++ .../src/github.com/golang/snappy/AUTHORS | 14 + .../src/github.com/golang/snappy/CONTRIBUTORS | 36 + .../src/github.com/golang/snappy/LICENSE | 27 + .../src/github.com/golang/snappy/README | 7 + .../src/github.com/golang/snappy/decode.go | 294 ++ .../src/github.com/golang/snappy/encode.go | 254 + .../src/github.com/golang/snappy/snappy.go | 68 + .../github.com/golang/snappy/snappy_test.go | 377 ++ .../github.com/gonuts/go-shellquote/README | 36 + .../gonuts/go-shellquote/both_test.go | 29 + .../github.com/gonuts/go-shellquote/doc.go | 3 + .../github.com/gonuts/go-shellquote/quote.go | 102 + .../gonuts/go-shellquote/quote_test.go | 28 + .../gonuts/go-shellquote/unquote.go | 144 + .../gonuts/go-shellquote/unquote_test.go | 52 + .../hashicorp/go-msgpack/codec/0doc.go | 143 + .../hashicorp/go-msgpack/codec/README.md | 174 + .../hashicorp/go-msgpack/codec/bench_test.go | 319 ++ .../hashicorp/go-msgpack/codec/binc.go | 786 +++ .../hashicorp/go-msgpack/codec/codecs_test.go | 1002 ++++ .../hashicorp/go-msgpack/codec/decode.go | 1048 ++++ .../hashicorp/go-msgpack/codec/encode.go | 1001 ++++ .../go-msgpack/codec/ext_dep_test.go | 75 + .../hashicorp/go-msgpack/codec/helper.go | 589 +++ .../go-msgpack/codec/helper_internal.go | 127 + .../hashicorp/go-msgpack/codec/msgpack.go | 816 ++++ .../go-msgpack/codec/msgpack_test.py | 110 + .../hashicorp/go-msgpack/codec/rpc.go | 152 + .../hashicorp/go-msgpack/codec/simple.go | 461 ++ .../hashicorp/go-msgpack/codec/time.go | 193 + .../go-msgpack/codec/z_helper_test.go | 103 + .../github.com/hashicorp/raft-boltdb/LICENSE | 362 ++ .../hashicorp/raft-boltdb/README.md | 11 + .../hashicorp/raft-boltdb/bench_test.go | 88 + .../hashicorp/raft-boltdb/bolt_store.go | 231 + .../hashicorp/raft-boltdb/bolt_store_test.go | 332 ++ .../github.com/hashicorp/raft-boltdb/util.go | 37 + .../src/github.com/hashicorp/raft/.gitignore | 23 + .../src/github.com/hashicorp/raft/.travis.yml | 14 + .../src/github.com/hashicorp/raft/LICENSE | 354 ++ .../src/github.com/hashicorp/raft/Makefile | 17 + .../src/github.com/hashicorp/raft/README.md | 89 + .../github.com/hashicorp/raft/bench/bench.go | 171 + .../src/github.com/hashicorp/raft/commands.go | 80 + .../src/github.com/hashicorp/raft/config.go | 125 + .../hashicorp/raft/discard_snapshot.go | 48 + .../hashicorp/raft/discard_snapshot_test.go | 17 + .../hashicorp/raft/file_snapshot.go | 460 ++ .../hashicorp/raft/file_snapshot_test.go | 343 ++ .../src/github.com/hashicorp/raft/fsm.go | 37 + .../src/github.com/hashicorp/raft/future.go | 182 + .../src/github.com/hashicorp/raft/inflight.go | 213 + .../hashicorp/raft/inflight_test.go | 150 + .../github.com/hashicorp/raft/inmem_store.go | 116 + .../hashicorp/raft/inmem_transport.go | 315 ++ .../hashicorp/raft/inmem_transport_test.go | 12 + .../github.com/hashicorp/raft/integ_test.go | 266 ++ .../src/github.com/hashicorp/raft/log.go | 60 + .../github.com/hashicorp/raft/log_cache.go | 79 + .../hashicorp/raft/log_cache_test.go | 88 + .../hashicorp/raft/net_transport.go | 606 +++ .../hashicorp/raft/net_transport_test.go | 449 ++ .../src/github.com/hashicorp/raft/peer.go | 122 + .../github.com/hashicorp/raft/peer_test.go | 44 + .../src/github.com/hashicorp/raft/raft.go | 1781 +++++++ .../github.com/hashicorp/raft/raft_test.go | 1454 ++++++ .../github.com/hashicorp/raft/replication.go | 513 ++ .../src/github.com/hashicorp/raft/snapshot.go | 40 + .../src/github.com/hashicorp/raft/stable.go | 15 + .../src/github.com/hashicorp/raft/state.go | 169 + .../hashicorp/raft/tcp_transport.go | 80 + .../hashicorp/raft/tcp_transport_test.go | 24 + .../github.com/hashicorp/raft/transport.go | 85 + .../src/github.com/hashicorp/raft/util.go | 200 + .../github.com/hashicorp/raft/util_test.go | 187 + .../influxdb/influxdb/client/README.md | 207 + .../influxdb/influxdb/client/example_test.go | 113 + .../influxdb/influxdb/client/influxdb.go | 656 +++ .../influxdb/influxdb/client/influxdb_test.go | 530 +++ .../influxdb/influxdb/influxql/INFLUXQL.md | 650 +++ .../influxdb/influxdb/influxql/NOTES | 682 +++ .../influxdb/influxdb/influxql/ast.go | 3069 ++++++++++++ .../influxdb/influxdb/influxql/ast_test.go | 759 +++ .../influxdb/influxdb/influxql/doc.go | 64 + .../influxdb/influxdb/influxql/functions.go | 1114 +++++ .../influxdb/influxql/functions_test.go | 534 +++ .../influxdb/influxdb/influxql/parser.go | 2274 +++++++++ .../influxdb/influxdb/influxql/parser_test.go | 1809 +++++++ .../influxdb/influxdb/influxql/result.go | 231 + .../influxdb/influxdb/influxql/scanner.go | 563 +++ .../influxdb/influxql/scanner_test.go | 287 ++ .../influxdb/influxdb/influxql/token.go | 296 ++ .../influxdb/influxdb/meta/config.go | 53 + .../influxdb/influxdb/meta/config_test.go | 36 + .../github.com/influxdb/influxdb/meta/data.go | 1063 +++++ .../influxdb/influxdb/meta/data_test.go | 684 +++ .../influxdb/influxdb/meta/errors.go | 115 + .../influxdb/meta/internal/meta.pb.go | 1421 ++++++ .../influxdb/meta/internal/meta.proto | 320 ++ .../influxdb/influxdb/meta/proxy.go | 62 + .../github.com/influxdb/influxdb/meta/rpc.go | 460 ++ .../influxdb/influxdb/meta/rpc_test.go | 242 + .../influxdb/influxdb/meta/state.go | 489 ++ .../influxdb/meta/statement_executor.go | 286 ++ .../influxdb/meta/statement_executor_test.go | 893 ++++ .../influxdb/influxdb/meta/store.go | 2079 ++++++++ .../influxdb/influxdb/meta/store_test.go | 1132 +++++ .../influxdb/influxdb/snapshot/snapshot.go | 529 +++ .../influxdb/snapshot/snapshot_test.go | 293 ++ .../github.com/influxdb/influxdb/toml/toml.go | 72 + .../influxdb/influxdb/toml/toml_test.go | 47 + .../influxdb/influxdb/tsdb/README.md | 85 + .../influxdb/influxdb/tsdb/batcher.go | 142 + .../influxdb/influxdb/tsdb/batcher_test.go | 122 + .../influxdb/influxdb/tsdb/config.go | 34 + .../influxdb/influxdb/tsdb/cursor.go | 119 + .../influxdb/influxdb/tsdb/cursor_test.go | 221 + .../github.com/influxdb/influxdb/tsdb/doc.go | 5 + .../influxdb/influxdb/tsdb/engine.go | 127 + .../influxdb/influxdb/tsdb/engine/b1/b1.go | 695 +++ .../influxdb/tsdb/engine/b1/b1_test.go | 134 + .../influxdb/influxdb/tsdb/engine/bz1/bz1.go | 627 +++ .../influxdb/tsdb/engine/bz1/bz1_test.go | 439 ++ .../influxdb/influxdb/tsdb/engine/engine.go | 6 + .../influxdb/influxdb/tsdb/engine_test.go | 3 + .../influxdb/influxdb/tsdb/executor.go | 981 ++++ .../influxdb/influxdb/tsdb/executor_test.go | 991 ++++ .../influxdb/tsdb/internal/meta.pb.go | 123 + .../influxdb/tsdb/internal/meta.proto | 27 + .../influxdb/influxdb/tsdb/mapper.go | 883 ++++ .../influxdb/influxdb/tsdb/mapper_test.go | 553 +++ .../github.com/influxdb/influxdb/tsdb/meta.go | 1309 +++++ .../influxdb/influxdb/tsdb/meta_test.go | 305 ++ .../influxdb/influxdb/tsdb/monitor.go | 83 + .../influxdb/influxdb/tsdb/points.go | 1226 +++++ .../influxdb/influxdb/tsdb/points_test.go | 1218 +++++ .../influxdb/influxdb/tsdb/query_executor.go | 934 ++++ .../influxdb/tsdb/query_executor_test.go | 482 ++ .../influxdb/influxdb/tsdb/shard.go | 707 +++ .../influxdb/influxdb/tsdb/shard_test.go | 340 ++ .../influxdb/influxdb/tsdb/snapshot_writer.go | 124 + .../influxdb/influxdb/tsdb/store.go | 333 ++ .../influxdb/influxdb/tsdb/store_test.go | 282 ++ .../src/github.com/lib/pq/.gitignore | 4 + .../src/github.com/lib/pq/.travis.yml | 68 + .../src/github.com/lib/pq/CONTRIBUTING.md | 29 + .../src/github.com/lib/pq/LICENSE.md | 8 + .../src/github.com/lib/pq/README.md | 103 + .../src/github.com/lib/pq/bench_test.go | 435 ++ .../_workspace/src/github.com/lib/pq/buf.go | 91 + .../src/github.com/lib/pq/certs/README | 3 + .../github.com/lib/pq/certs/postgresql.crt | 69 + .../github.com/lib/pq/certs/postgresql.key | 15 + .../src/github.com/lib/pq/certs/root.crt | 24 + .../src/github.com/lib/pq/certs/server.crt | 81 + .../src/github.com/lib/pq/certs/server.key | 27 + .../_workspace/src/github.com/lib/pq/conn.go | 1775 +++++++ .../src/github.com/lib/pq/conn_test.go | 1306 +++++ .../_workspace/src/github.com/lib/pq/copy.go | 268 ++ .../src/github.com/lib/pq/copy_test.go | 462 ++ .../_workspace/src/github.com/lib/pq/doc.go | 210 + .../src/github.com/lib/pq/encode.go | 538 +++ .../src/github.com/lib/pq/encode_test.go | 719 +++ .../_workspace/src/github.com/lib/pq/error.go | 508 ++ .../src/github.com/lib/pq/hstore/hstore.go | 118 + .../github.com/lib/pq/hstore/hstore_test.go | 148 + .../github.com/lib/pq/listen_example/doc.go | 102 + .../src/github.com/lib/pq/notify.go | 766 +++ .../src/github.com/lib/pq/notify_test.go | 574 +++ .../src/github.com/lib/pq/oid/doc.go | 6 + .../src/github.com/lib/pq/oid/gen.go | 74 + .../src/github.com/lib/pq/oid/types.go | 161 + .../src/github.com/lib/pq/ssl_test.go | 226 + .../_workspace/src/github.com/lib/pq/url.go | 76 + .../src/github.com/lib/pq/url_test.go | 54 + .../src/github.com/lib/pq/user_posix.go | 24 + .../src/github.com/lib/pq/user_windows.go | 27 + .../pbutil/all_test.go | 320 ++ .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../pbutil/fixtures_test.go | 103 + .../naoina/go-stringutil/.travis.yml | 9 + .../github.com/naoina/go-stringutil/LICENSE | 19 + .../github.com/naoina/go-stringutil/README.md | 13 + .../naoina/go-stringutil/strings.go | 120 + .../go-stringutil/strings_bench_test.go | 35 + .../naoina/go-stringutil/strings_test.go | 88 + .../src/github.com/naoina/toml/.travis.yml | 11 + .../src/github.com/naoina/toml/LICENSE | 19 + .../src/github.com/naoina/toml/Makefile | 16 + .../src/github.com/naoina/toml/README.md | 364 ++ .../src/github.com/naoina/toml/ast/ast.go | 184 + .../src/github.com/naoina/toml/decode.go | 678 +++ .../naoina/toml/decode_bench_test.go | 49 + .../src/github.com/naoina/toml/decode_test.go | 1123 +++++ .../src/github.com/naoina/toml/encode.go | 235 + .../src/github.com/naoina/toml/encode_test.go | 298 ++ .../src/github.com/naoina/toml/error.go | 31 + .../src/github.com/naoina/toml/parse.go | 54 + .../src/github.com/naoina/toml/parse.peg | 138 + .../src/github.com/naoina/toml/parse.peg.go | 3065 ++++++++++++ .../github.com/naoina/toml/testdata/test.toml | 244 + .../src/github.com/naoina/toml/util.go | 79 + .../client_golang/extraction/discriminator.go | 74 + .../extraction/discriminator_test.go | 126 + .../client_golang/extraction/extraction.go | 15 + .../extraction/fixtures/empty.json | 0 .../fixtures/test0_0_1-0_0_2-large.json | 1032 ++++ .../extraction/fixtures/test0_0_1-0_0_2.json | 79 + .../extraction/metricfamilyprocessor.go | 318 ++ .../extraction/metricfamilyprocessor_test.go | 153 + .../client_golang/extraction/processor.go | 84 + .../extraction/processor0_0_1.go | 127 + .../extraction/processor0_0_1_test.go | 185 + .../extraction/processor0_0_2.go | 106 + .../extraction/processor0_0_2_test.go | 225 + .../client_golang/extraction/textprocessor.go | 40 + .../extraction/textprocessor_test.go | 100 + .../client_golang/model/fingerprinting.go | 110 + .../client_golang/model/labelname.go | 133 + .../client_golang/model/labelname_test.go | 55 + .../client_golang/model/labelset.go | 83 + .../client_golang/model/labelvalue.go | 36 + .../client_golang/model/labelvalue_test.go | 55 + .../prometheus/client_golang/model/metric.go | 192 + .../client_golang/model/metric_test.go | 130 + .../prometheus/client_golang/model/model.go | 15 + .../prometheus/client_golang/model/sample.go | 79 + .../client_golang/model/sample_test.go | 114 + .../client_golang/model/samplevalue.go | 37 + .../client_golang/model/signature.go | 190 + .../client_golang/model/signature_test.go | 305 ++ .../client_golang/model/timestamp.go | 116 + .../client_golang/model/timestamp_test.go | 86 + .../client_golang/text/bench_test.go | 169 + .../prometheus/client_golang/text/create.go | 315 ++ .../client_golang/text/create_test.go | 439 ++ .../prometheus/client_golang/text/fuzz.go | 36 + .../text/fuzz/corpus/from_test_parse_0 | 2 + .../text/fuzz/corpus/from_test_parse_1 | 6 + .../text/fuzz/corpus/from_test_parse_2 | 12 + .../text/fuzz/corpus/from_test_parse_3 | 22 + .../text/fuzz/corpus/from_test_parse_4 | 10 + .../text/fuzz/corpus/from_test_parse_error_0 | 1 + .../text/fuzz/corpus/from_test_parse_error_1 | 1 + .../text/fuzz/corpus/from_test_parse_error_10 | 1 + .../text/fuzz/corpus/from_test_parse_error_11 | 1 + .../text/fuzz/corpus/from_test_parse_error_12 | 3 + .../text/fuzz/corpus/from_test_parse_error_13 | 3 + .../text/fuzz/corpus/from_test_parse_error_14 | 3 + .../text/fuzz/corpus/from_test_parse_error_15 | 2 + .../text/fuzz/corpus/from_test_parse_error_16 | 2 + .../text/fuzz/corpus/from_test_parse_error_17 | 1 + .../text/fuzz/corpus/from_test_parse_error_18 | 1 + .../text/fuzz/corpus/from_test_parse_error_19 | 3 + .../text/fuzz/corpus/from_test_parse_error_2 | 3 + .../text/fuzz/corpus/from_test_parse_error_3 | 1 + .../text/fuzz/corpus/from_test_parse_error_4 | 1 + .../text/fuzz/corpus/from_test_parse_error_5 | 1 + .../text/fuzz/corpus/from_test_parse_error_6 | 1 + .../text/fuzz/corpus/from_test_parse_error_7 | 3 + .../text/fuzz/corpus/from_test_parse_error_8 | 1 + .../text/fuzz/corpus/from_test_parse_error_9 | 1 + .../client_golang/text/fuzz/corpus/minimal | 1 + .../prometheus/client_golang/text/parse.go | 746 +++ .../client_golang/text/parse_test.go | 588 +++ .../prometheus/client_golang/text/proto.go | 43 + .../client_golang/text/testdata/protobuf | Bin 0 -> 8243 bytes .../client_golang/text/testdata/protobuf.gz | Bin 0 -> 2053 bytes .../client_golang/text/testdata/text | 322 ++ .../client_golang/text/testdata/text.gz | Bin 0 -> 2595 bytes .../prometheus/client_model/go/metrics.pb.go | 364 ++ .../samuel/go-zookeeper/zk/cluster_test.go | 166 + .../github.com/samuel/go-zookeeper/zk/conn.go | 871 ++++ .../samuel/go-zookeeper/zk/constants.go | 240 + .../samuel/go-zookeeper/zk/constants_test.go | 24 + .../github.com/samuel/go-zookeeper/zk/flw.go | 288 ++ .../samuel/go-zookeeper/zk/flw_test.go | 367 ++ .../github.com/samuel/go-zookeeper/zk/lock.go | 142 + .../samuel/go-zookeeper/zk/lock_test.go | 94 + .../samuel/go-zookeeper/zk/server_help.go | 119 + .../samuel/go-zookeeper/zk/server_java.go | 136 + .../samuel/go-zookeeper/zk/structs.go | 640 +++ .../samuel/go-zookeeper/zk/structs_test.go | 71 + .../samuel/go-zookeeper/zk/tracer.go | 148 + .../github.com/samuel/go-zookeeper/zk/util.go | 54 + .../samuel/go-zookeeper/zk/util_test.go | 17 + .../samuel/go-zookeeper/zk/zk_test.go | 518 ++ .../src/github.com/stretchr/objx/.gitignore | 22 + .../src/github.com/stretchr/objx/LICENSE.md | 23 + .../src/github.com/stretchr/objx/README.md | 3 + .../src/github.com/stretchr/objx/accessors.go | 179 + .../stretchr/objx/accessors_test.go | 145 + .../stretchr/objx/codegen/array-access.txt | 14 + .../stretchr/objx/codegen/index.html | 86 + .../stretchr/objx/codegen/template.txt | 286 ++ .../stretchr/objx/codegen/types_list.txt | 20 + .../src/github.com/stretchr/objx/constants.go | 13 + .../github.com/stretchr/objx/conversions.go | 117 + .../stretchr/objx/conversions_test.go | 94 + .../src/github.com/stretchr/objx/doc.go | 72 + .../github.com/stretchr/objx/fixture_test.go | 98 + .../src/github.com/stretchr/objx/map.go | 222 + .../github.com/stretchr/objx/map_for_test.go | 10 + .../src/github.com/stretchr/objx/map_test.go | 147 + .../src/github.com/stretchr/objx/mutations.go | 81 + .../stretchr/objx/mutations_test.go | 77 + .../src/github.com/stretchr/objx/security.go | 14 + .../github.com/stretchr/objx/security_test.go | 12 + .../stretchr/objx/simple_example_test.go | 41 + .../src/github.com/stretchr/objx/tests.go | 17 + .../github.com/stretchr/objx/tests_test.go | 24 + .../stretchr/objx/type_specific_codegen.go | 2881 +++++++++++ .../objx/type_specific_codegen_test.go | 2867 +++++++++++ .../src/github.com/stretchr/objx/value.go | 13 + .../github.com/stretchr/objx/value_test.go | 1 + .../stretchr/testify/assert/assertions.go | 895 ++++ .../testify/assert/assertions_test.go | 813 ++++ .../github.com/stretchr/testify/assert/doc.go | 154 + .../stretchr/testify/assert/errors.go | 10 + .../testify/assert/forward_assertions.go | 265 ++ .../testify/assert/forward_assertions_test.go | 511 ++ .../testify/assert/http_assertions.go | 157 + .../testify/assert/http_assertions_test.go | 86 + .../github.com/stretchr/testify/mock/doc.go | 43 + .../github.com/stretchr/testify/mock/mock.go | 594 +++ .../stretchr/testify/mock/mock_test.go | 908 ++++ .../stretchr/testify/require/doc.go | 77 + .../testify/require/forward_requirements.go | 211 + .../require/forward_requirements_test.go | 260 + .../stretchr/testify/require/requirements.go | 271 ++ .../testify/require/requirements_test.go | 266 ++ .../kafka/consumergroup/consumer_group.go | 419 ++ .../consumergroup/consumer_group_test.go | 67 + .../consumergroup_integration_test.go | 276 ++ .../kafka/consumergroup/offset_manager.go | 268 ++ .../wvanbergen/kafka/consumergroup/utils.go | 113 + .../github.com/wvanbergen/kazoo-go/.gitignore | 2 + .../wvanbergen/kazoo-go/.travis.yml | 31 + .../wvanbergen/kazoo-go/MIT-LICENSE | 21 + .../github.com/wvanbergen/kazoo-go/Makefile | 86 + .../github.com/wvanbergen/kazoo-go/README.md | 6 + .../wvanbergen/kazoo-go/consumergroup.go | 409 ++ .../kazoo-go/functional_cluster_test.go | 98 + .../kazoo-go/functional_consumergroup_test.go | 447 ++ .../functional_topic_metadata_test.go | 111 + .../github.com/wvanbergen/kazoo-go/kazoo.go | 210 + .../wvanbergen/kazoo-go/kazoo_test.go | 44 + .../kazoo-go/tools/kafka-topics/.gitignore | 2 + .../tools/kafka-topics/kafka-topics.go | 93 + .../wvanbergen/kazoo-go/topic_metadata.go | 190 + .../src/golang.org/x/crypto/bcrypt/base64.go | 35 + .../src/golang.org/x/crypto/bcrypt/bcrypt.go | 294 ++ .../golang.org/x/crypto/bcrypt/bcrypt_test.go | 226 + .../src/golang.org/x/crypto/blowfish/block.go | 159 + .../x/crypto/blowfish/blowfish_test.go | 274 ++ .../golang.org/x/crypto/blowfish/cipher.go | 91 + .../src/golang.org/x/crypto/blowfish/const.go | 199 + .../dancannon/gorethink.v1/.gitignore | 27 + .../dancannon/gorethink.v1/.travis.yml | 19 + .../dancannon/gorethink.v1/CHANGELOG.md | 230 + .../gopkg.in/dancannon/gorethink.v1/LICENSE | 191 + .../gopkg.in/dancannon/gorethink.v1/README.md | 270 ++ .../dancannon/gorethink.v1/benchmarks_test.go | 284 ++ .../gopkg.in/dancannon/gorethink.v1/buffer.go | 143 + .../dancannon/gorethink.v1/checkers_test.go | 168 + .../dancannon/gorethink.v1/cluster.go | 411 ++ .../gorethink.v1/cluster_integration_test.go | 99 + .../dancannon/gorethink.v1/cluster_test.go | 63 + .../dancannon/gorethink.v1/connection.go | 305 ++ .../gorethink.v1/connection_helper.go | 108 + .../gopkg.in/dancannon/gorethink.v1/cursor.go | 467 ++ .../dancannon/gorethink.v1/cursor_test.go | 398 ++ .../gopkg.in/dancannon/gorethink.v1/doc.go | 6 + .../dancannon/gorethink.v1/encoding/cache.go | 258 + .../gorethink.v1/encoding/decoder.go | 141 + .../gorethink.v1/encoding/decoder_test.go | 426 ++ .../gorethink.v1/encoding/decoder_types.go | 524 ++ .../gorethink.v1/encoding/encoder.go | 89 + .../gorethink.v1/encoding/encoder_test.go | 262 + .../gorethink.v1/encoding/encoder_types.go | 303 ++ .../gorethink.v1/encoding/encoding.go | 32 + .../dancannon/gorethink.v1/encoding/errors.go | 102 + .../dancannon/gorethink.v1/encoding/fold.go | 139 + .../dancannon/gorethink.v1/encoding/tags.go | 69 + .../dancannon/gorethink.v1/encoding/utils.go | 72 + .../gopkg.in/dancannon/gorethink.v1/errors.go | 143 + .../example_query_aggregation_test.go | 113 + .../example_query_control_test.go | 220 + .../gorethink.v1/example_query_db_test.go | 34 + .../example_query_manipulation_test.go | 25 + .../gorethink.v1/example_query_select_test.go | 129 + .../gorethink.v1/example_query_table_test.go | 57 + .../example_query_transformation_test.go | 161 + .../gorethink.v1/example_query_write_test.go | 228 + .../dancannon/gorethink.v1/example_test.go | 30 + .../dancannon/gorethink.v1/gorethink.go | 31 + .../dancannon/gorethink.v1/gorethink_test.go | 434 ++ .../gopkg.in/dancannon/gorethink.v1/host.go | 24 + .../gopkg.in/dancannon/gorethink.v1/node.go | 230 + .../gopkg.in/dancannon/gorethink.v1/pool.go | 539 +++ .../dancannon/gorethink.v1/pool_conn.go | 75 + .../dancannon/gorethink.v1/pseudotypes.go | 220 + .../dancannon/gorethink.v1/ql2/ql2.pb.go | 1243 +++++ .../dancannon/gorethink.v1/ql2/ql2.proto | 805 ++++ .../gopkg.in/dancannon/gorethink.v1/query.go | 290 ++ .../dancannon/gorethink.v1/query_admin.go | 71 + .../gorethink.v1/query_admin_test.go | 110 + .../gorethink.v1/query_aggregation.go | 169 + .../gorethink.v1/query_aggregation_test.go | 320 ++ .../dancannon/gorethink.v1/query_control.go | 299 ++ .../gorethink.v1/query_control_test.go | 448 ++ .../dancannon/gorethink.v1/query_db.go | 25 + .../dancannon/gorethink.v1/query_db_test.go | 57 + .../gorethink.v1/query_geospatial.go | 170 + .../gorethink.v1/query_geospatial_test.go | 515 ++ .../dancannon/gorethink.v1/query_join.go | 46 + .../dancannon/gorethink.v1/query_join_test.go | 177 + .../gorethink.v1/query_manipulation.go | 117 + .../gorethink.v1/query_manipulation_test.go | 318 ++ .../dancannon/gorethink.v1/query_math.go | 186 + .../dancannon/gorethink.v1/query_math_test.go | 252 + .../dancannon/gorethink.v1/query_select.go | 128 + .../gorethink.v1/query_select_test.go | 492 ++ .../dancannon/gorethink.v1/query_string.go | 44 + .../gorethink.v1/query_string_test.go | 91 + .../dancannon/gorethink.v1/query_table.go | 142 + .../gorethink.v1/query_table_test.go | 255 + .../dancannon/gorethink.v1/query_test.go | 50 + .../dancannon/gorethink.v1/query_time.go | 187 + .../dancannon/gorethink.v1/query_time_test.go | 137 + .../gorethink.v1/query_transformation.go | 163 + .../gorethink.v1/query_transformation_test.go | 426 ++ .../dancannon/gorethink.v1/query_write.go | 97 + .../gorethink.v1/query_write_test.go | 99 + .../dancannon/gorethink.v1/session.go | 190 + .../dancannon/gorethink.v1/session_test.go | 72 + .../dancannon/gorethink.v1/testdata_test.go | 105 + .../dancannon/gorethink.v1/types/geometry.go | 225 + .../gopkg.in/dancannon/gorethink.v1/utils.go | 249 + .../dancannon/gorethink.v1/wercker.yml | 31 + Godeps/_workspace/src/gopkg.in/mgo.v2/LICENSE | 25 + .../_workspace/src/gopkg.in/mgo.v2/Makefile | 5 + .../_workspace/src/gopkg.in/mgo.v2/README.md | 4 + Godeps/_workspace/src/gopkg.in/mgo.v2/auth.go | 467 ++ .../src/gopkg.in/mgo.v2/auth_test.go | 1180 +++++ .../src/gopkg.in/mgo.v2/bson/LICENSE | 25 + .../src/gopkg.in/mgo.v2/bson/bson.go | 705 +++ .../src/gopkg.in/mgo.v2/bson/bson_test.go | 1605 +++++++ .../src/gopkg.in/mgo.v2/bson/decode.go | 825 ++++ .../src/gopkg.in/mgo.v2/bson/encode.go | 503 ++ Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go | 71 + .../src/gopkg.in/mgo.v2/bulk_test.go | 131 + .../_workspace/src/gopkg.in/mgo.v2/cluster.go | 632 +++ .../src/gopkg.in/mgo.v2/cluster_test.go | 1657 +++++++ .../src/gopkg.in/mgo.v2/dbtest/dbserver.go | 196 + .../gopkg.in/mgo.v2/dbtest/dbserver_test.go | 108 + .../src/gopkg.in/mgo.v2/dbtest/export_test.go | 12 + Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go | 31 + .../src/gopkg.in/mgo.v2/export_test.go | 33 + .../_workspace/src/gopkg.in/mgo.v2/gridfs.go | 755 +++ .../src/gopkg.in/mgo.v2/gridfs_test.go | 708 +++ .../src/gopkg.in/mgo.v2/internal/sasl/sasl.c | 77 + .../src/gopkg.in/mgo.v2/internal/sasl/sasl.go | 138 + .../mgo.v2/internal/sasl/sasl_windows.c | 118 + .../mgo.v2/internal/sasl/sasl_windows.go | 140 + .../mgo.v2/internal/sasl/sasl_windows.h | 7 + .../mgo.v2/internal/sasl/sspi_windows.c | 96 + .../mgo.v2/internal/sasl/sspi_windows.h | 70 + .../gopkg.in/mgo.v2/internal/scram/scram.go | 266 ++ .../mgo.v2/internal/scram/scram_test.go | 67 + Godeps/_workspace/src/gopkg.in/mgo.v2/log.go | 133 + .../_workspace/src/gopkg.in/mgo.v2/queue.go | 91 + .../src/gopkg.in/mgo.v2/queue_test.go | 101 + .../_workspace/src/gopkg.in/mgo.v2/raceoff.go | 5 + .../_workspace/src/gopkg.in/mgo.v2/raceon.go | 5 + .../src/gopkg.in/mgo.v2/saslimpl.go | 11 + .../src/gopkg.in/mgo.v2/saslstub.go | 11 + .../_workspace/src/gopkg.in/mgo.v2/server.go | 448 ++ .../_workspace/src/gopkg.in/mgo.v2/session.go | 4224 +++++++++++++++++ .../src/gopkg.in/mgo.v2/session_test.go | 3704 +++++++++++++++ .../_workspace/src/gopkg.in/mgo.v2/socket.go | 677 +++ .../_workspace/src/gopkg.in/mgo.v2/stats.go | 147 + .../src/gopkg.in/mgo.v2/suite_test.go | 254 + .../src/gopkg.in/mgo.v2/syscall_test.go | 15 + .../gopkg.in/mgo.v2/syscall_windows_test.go | 11 + .../src/gopkg.in/mgo.v2/testdb/client.pem | 44 + .../src/gopkg.in/mgo.v2/testdb/dropall.js | 66 + .../src/gopkg.in/mgo.v2/testdb/init.js | 110 + .../src/gopkg.in/mgo.v2/testdb/server.pem | 33 + .../src/gopkg.in/mgo.v2/testdb/setup.sh | 58 + .../gopkg.in/mgo.v2/testdb/supervisord.conf | 65 + .../src/gopkg.in/mgo.v2/testdb/wait.js | 58 + .../gopkg.in/mgo.v2/testserver/export_test.go | 12 + .../gopkg.in/mgo.v2/testserver/testserver.go | 168 + .../mgo.v2/testserver/testserver_test.go | 108 + .../src/gopkg.in/mgo.v2/txn/chaos.go | 68 + .../src/gopkg.in/mgo.v2/txn/debug.go | 109 + .../src/gopkg.in/mgo.v2/txn/dockey_test.go | 205 + .../src/gopkg.in/mgo.v2/txn/flusher.go | 985 ++++ .../src/gopkg.in/mgo.v2/txn/mgo_test.go | 101 + .../src/gopkg.in/mgo.v2/txn/sim_test.go | 388 ++ .../src/gopkg.in/mgo.v2/txn/tarjan.go | 94 + .../src/gopkg.in/mgo.v2/txn/tarjan_test.go | 44 + .../_workspace/src/gopkg.in/mgo.v2/txn/txn.go | 611 +++ .../src/gopkg.in/mgo.v2/txn/txn_test.go | 753 +++ Makefile | 9 +- circle-test.sh | 64 + circle.yml | 29 +- package.sh | 13 +- 1055 files changed, 258568 insertions(+), 44 deletions(-) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user.a create mode 100644 Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/testing.a create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/Makefile create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/broker.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/client.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/config.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/errors.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/message.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/request.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/utils.go create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/create_topics.sh create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/install_cluster.sh create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/server.properties create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/toxiproxy.conf create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/zookeeper.conf create mode 100644 Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/zookeeper.properties create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/README.md create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/sink.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/start.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go create mode 100644 Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/Makefile create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/README.md create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/db.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/doc.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/errors.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/node.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/page.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/tx.go create mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/README.md create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go create mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto create mode 100644 Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go create mode 100644 Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md create mode 100644 Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go create mode 100644 Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go create mode 100644 Godeps/_workspace/src/github.com/eapache/queue/.gitignore create mode 100644 Godeps/_workspace/src/github.com/eapache/queue/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/eapache/queue/LICENSE create mode 100644 Godeps/_workspace/src/github.com/eapache/queue/README.md create mode 100644 Godeps/_workspace/src/github.com/eapache/queue/queue.go create mode 100644 Godeps/_workspace/src/github.com/eapache/queue/queue_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem create mode 120000 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go create mode 100644 Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/LICENSE create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/README create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/decode.go create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/encode.go create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/snappy.go create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/README create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/Makefile create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/README.md create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/commands.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/config.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/future.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/log.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/peer.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/raft.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/replication.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/stable.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/state.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/transport.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/util.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/NOTES create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/doc.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/result.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/token.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/errors.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.pb.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.proto create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/proxy.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/rpc.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/rpc_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/state.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/snapshot/snapshot.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/snapshot/snapshot_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/toml/toml.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/toml/toml_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/batcher.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/batcher_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/cursor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/cursor_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/doc.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/b1/b1.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/b1/b1_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/bz1/bz1.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/bz1/bz1_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/engine.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/executor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/executor_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/internal/meta.pb.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/internal/meta.proto create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/mapper.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/mapper_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/meta.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/meta_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/monitor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/points.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/points_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/query_executor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/query_executor_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/shard.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/shard_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/snapshot_writer.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/store.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/store_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/.gitignore create mode 100644 Godeps/_workspace/src/github.com/lib/pq/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/lib/pq/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/lib/pq/LICENSE.md create mode 100644 Godeps/_workspace/src/github.com/lib/pq/README.md create mode 100644 Godeps/_workspace/src/github.com/lib/pq/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/buf.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/certs/README create mode 100644 Godeps/_workspace/src/github.com/lib/pq/certs/postgresql.crt create mode 100644 Godeps/_workspace/src/github.com/lib/pq/certs/postgresql.key create mode 100644 Godeps/_workspace/src/github.com/lib/pq/certs/root.crt create mode 100644 Godeps/_workspace/src/github.com/lib/pq/certs/server.crt create mode 100644 Godeps/_workspace/src/github.com/lib/pq/certs/server.key create mode 100644 Godeps/_workspace/src/github.com/lib/pq/conn.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/conn_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/copy.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/copy_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/doc.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/encode.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/encode_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/error.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/hstore/hstore.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/hstore/hstore_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/listen_example/doc.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/notify.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/notify_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/oid/doc.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/oid/gen.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/oid/types.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/ssl_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/url.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/url_test.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/user_posix.go create mode 100644 Godeps/_workspace/src/github.com/lib/pq/user_windows.go create mode 100644 Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go create mode 100644 Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go create mode 100644 Godeps/_workspace/src/github.com/naoina/go-stringutil/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/naoina/go-stringutil/LICENSE create mode 100644 Godeps/_workspace/src/github.com/naoina/go-stringutil/README.md create mode 100644 Godeps/_workspace/src/github.com/naoina/go-stringutil/strings.go create mode 100644 Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_test.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/LICENSE create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/Makefile create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/README.md create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/ast/ast.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/decode.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/decode_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/decode_test.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/encode.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/encode_test.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/error.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/parse.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/parse.peg create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/parse.peg.go create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/testdata/test.toml create mode 100644 Godeps/_workspace/src/github.com/naoina/toml/util.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/empty.json create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_0 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_1 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_2 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_3 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_4 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_0 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_1 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_10 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_11 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_12 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_13 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_14 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_15 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_16 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_17 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_18 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_19 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_2 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_3 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_4 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_5 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_6 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_7 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_8 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/from_test_parse_error_9 create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/fuzz/corpus/minimal create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/conn.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_help.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_java.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/tracer.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go create mode 100644 Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/.gitignore create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/README.md create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/constants.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/requirements_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kafka/consumergroup/consumer_group.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kafka/consumergroup/consumer_group_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kafka/consumergroup/consumergroup_integration_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kafka/consumergroup/offset_manager.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kafka/consumergroup/utils.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/.gitignore create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/MIT-LICENSE create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/Makefile create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/README.md create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/consumergroup.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/functional_cluster_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/functional_consumergroup_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/functional_topic_metadata_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/kazoo.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/kazoo_test.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/tools/kafka-topics/.gitignore create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/tools/kafka-topics/kafka-topics.go create mode 100644 Godeps/_workspace/src/github.com/wvanbergen/kazoo-go/topic_metadata.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/bcrypt/base64.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/.gitignore create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/CHANGELOG.md create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/benchmarks_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/buffer.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/checkers_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/cluster.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/cluster_integration_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/cluster_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/connection.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/connection_helper.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/cursor.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/cursor_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/doc.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/cache.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/decoder.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/decoder_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/decoder_types.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/encoder.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/encoder_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/encoder_types.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/encoding.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/errors.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/fold.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/tags.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/encoding/utils.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/errors.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_aggregation_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_control_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_db_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_manipulation_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_select_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_table_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_transformation_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_query_write_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/example_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/gorethink.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/gorethink_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/host.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/node.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/pool.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/pool_conn.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/pseudotypes.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/ql2/ql2.pb.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/ql2/ql2.proto create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_admin.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_admin_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_aggregation.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_aggregation_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_control.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_control_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_db.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_db_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_geospatial.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_geospatial_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_join.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_join_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_manipulation.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_manipulation_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_math.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_math_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_select.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_select_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_string.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_string_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_table.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_table_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_time.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_time_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_transformation.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_transformation_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_write.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/query_write_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/session.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/session_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/testdata_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/types/geometry.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/utils.go create mode 100644 Godeps/_workspace/src/gopkg.in/dancannon/gorethink.v1/wercker.yml create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/Makefile create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/auth.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/auth_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bulk_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/cluster.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/cluster_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/dbtest/dbserver.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/dbtest/dbserver_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/dbtest/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl.c create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram/scram.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram/scram_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/log.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/queue.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/queue_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/raceoff.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/raceon.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/saslimpl.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/saslstub.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/server.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/session.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/session_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/socket.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/stats.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/suite_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_windows_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/client.pem create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/dropall.js create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/init.js create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/server.pem create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/setup.sh create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/supervisord.conf create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/wait.js create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testserver/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testserver/testserver.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/testserver/testserver_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/chaos.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/debug.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/dockey_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/flusher.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/mgo_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/sim_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/tarjan.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/tarjan_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn.go create mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn_test.go create mode 100755 circle-test.sh diff --git a/.gitignore b/.gitignore index a471ffe03..d69f9330b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -pkg/ tivan .vagrant telegraf diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 000000000..a4544d3aa --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,210 @@ +{ + "ImportPath": "github.com/influxdb/telegraf", + "GoVersion": "go1.4.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Shopify/sarama", + "Comment": "v1.4.3-45-g5b18996", + "Rev": "5b18996ef1cd555a60562ae4c5d7843ae137e12d" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.8.6-7-g9c060de", + "Rev": "9c060de643590dae45da9d7c26276463bfc46fa0" + }, + { + "ImportPath": "github.com/armon/go-metrics", + "Rev": "b2d95e5291cdbc26997d1301a5e467ecbb240e25" + }, + { + "ImportPath": "github.com/boltdb/bolt", + "Comment": "v1.0-117-g0f053fa", + "Rev": "0f053fabc06119583d61937a0a06ef0ba0f1b301" + }, + { + "ImportPath": "github.com/cenkalti/backoff", + "Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99" + }, + { + "ImportPath": "github.com/dancannon/gorethink/encoding", + "Comment": "v1.x.x-1-g786f12a", + "Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f" + }, + { + "ImportPath": "github.com/dancannon/gorethink/ql2", + "Comment": "v1.x.x-1-g786f12a", + "Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f" + }, + { + "ImportPath": "github.com/dancannon/gorethink/types", + "Comment": "v1.x.x-1-g786f12a", + "Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f" + }, + { + "ImportPath": "github.com/eapache/go-resiliency/breaker", + "Comment": "v1.0.0-1-ged0319b", + "Rev": "ed0319b32e66e3295db52695ba3ee493e823fbfe" + }, + { + "ImportPath": "github.com/eapache/queue", + "Comment": "v1.0.2", + "Rev": "ded5959c0d4e360646dc9e9908cff48666781367" + }, + { + "ImportPath": "github.com/fsouza/go-dockerclient", + "Rev": "42d06e2b125654477366c320dcea99107a86e9c2" + }, + { + "ImportPath": "github.com/go-sql-driver/mysql", + "Comment": "v1.2-118-g3dd7008", + "Rev": "3dd7008ac1529aca1bcd8a9db75228a71ba23cac" + }, + { + "ImportPath": "github.com/gogo/protobuf/proto", + "Rev": "cabd153b69f71bab8b89fd667a2d9bb28c92ceb4" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "73aaaa9eb61d74fbf7e256ca586a3a565b308eea" + }, + { + "ImportPath": "github.com/golang/snappy", + "Rev": "723cc1e459b8eea2dea4583200fd60757d40097a" + }, + { + "ImportPath": "github.com/gonuts/go-shellquote", + "Rev": "e842a11b24c6abfb3dd27af69a17f482e4b483c2" + }, + { + "ImportPath": "github.com/hashicorp/go-msgpack/codec", + "Rev": "fa3f63826f7c23912c15263591e65d54d080b458" + }, + { + "ImportPath": "github.com/hashicorp/raft", + "Rev": "9b586e29edf1ed085b11da7772479ee45c433996" + }, + { + "ImportPath": "github.com/hashicorp/raft-boltdb", + "Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee" + }, + { + "ImportPath": "github.com/influxdb/influxdb/client", + "Comment": "v0.9.1-rc1-536-g1548f62", + "Rev": "1548f6289f2f0d96178f23e14bf72f6dae0eb437" + }, + { + "ImportPath": "github.com/influxdb/influxdb/influxql", + "Comment": "v0.9.1-rc1-536-g1548f62", + "Rev": "1548f6289f2f0d96178f23e14bf72f6dae0eb437" + }, + { + "ImportPath": "github.com/influxdb/influxdb/meta", + "Comment": "v0.9.1-rc1-536-g1548f62", + "Rev": "1548f6289f2f0d96178f23e14bf72f6dae0eb437" + }, + { + "ImportPath": "github.com/influxdb/influxdb/snapshot", + "Comment": "v0.9.1-rc1-536-g1548f62", + "Rev": "1548f6289f2f0d96178f23e14bf72f6dae0eb437" + }, + { + "ImportPath": "github.com/influxdb/influxdb/toml", + "Comment": "v0.9.1-rc1-536-g1548f62", + "Rev": "1548f6289f2f0d96178f23e14bf72f6dae0eb437" + }, + { + "ImportPath": "github.com/influxdb/influxdb/tsdb", + "Comment": "v0.9.1-rc1-536-g1548f62", + "Rev": "1548f6289f2f0d96178f23e14bf72f6dae0eb437" + }, + { + "ImportPath": "github.com/lib/pq", + "Comment": "go1.0-cutoff-59-gb269bd0", + "Rev": "b269bd035a727d6c1081f76e7a239a1b00674c40" + }, + { + "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" + }, + { + "ImportPath": "github.com/naoina/go-stringutil", + "Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761" + }, + { + "ImportPath": "github.com/naoina/toml", + "Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e" + }, + { + "ImportPath": "github.com/prometheus/client_golang/extraction", + "Comment": "0.7.0-22-gbbd006b", + "Rev": "bbd006bc5e64ea2c807381d50263be5f230b427d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/model", + "Comment": "0.7.0-22-gbbd006b", + "Rev": "bbd006bc5e64ea2c807381d50263be5f230b427d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/text", + "Comment": "0.7.0-22-gbbd006b", + "Rev": "bbd006bc5e64ea2c807381d50263be5f230b427d" + }, + { + "ImportPath": "github.com/prometheus/client_model/go", + "Comment": "model-0.0.2-12-gfa8ad6f", + "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" + }, + { + "ImportPath": "github.com/samuel/go-zookeeper/zk", + "Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4" + }, + { + "ImportPath": "github.com/stretchr/objx", + "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" + }, + { + "ImportPath": "github.com/stretchr/testify/assert", + "Comment": "v1.0-21-gf552045", + "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" + }, + { + "ImportPath": "github.com/stretchr/testify/mock", + "Comment": "v1.0-21-gf552045", + "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" + }, + { + "ImportPath": "github.com/stretchr/testify/require", + "Comment": "v1.0-21-gf552045", + "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" + }, + { + "ImportPath": "github.com/wvanbergen/kafka/consumergroup", + "Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d" + }, + { + "ImportPath": "github.com/wvanbergen/kazoo-go", + "Rev": "02a3868e9b87153285439cd27a39c0a2984a13af" + }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd" + }, + { + "ImportPath": "gopkg.in/dancannon/gorethink.v1", + "Comment": "v1.x.x", + "Rev": "8aca6ba2cc6e873299617d730fac0d7f6593113a" + }, + { + "ImportPath": "gopkg.in/mgo.v2", + "Comment": "r2015.06.03-3-g3569c88", + "Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 000000000..f037d684e --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient.a b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient.a new file mode 100644 index 000000000..9ecf800ae --- /dev/null +++ b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient.a @@ -0,0 +1,1806 @@ +! +__.PKGDEF 0 0 0 644 98955 ` +go object darwin amd64 go1.4.2 X:precisestack + +$$ +package docker + import net "net" + import ioutil "io/ioutil" + import sync "sync" + import runtime "runtime" + import tls "crypto/tls" + import bufio "bufio" + import time "time" + import archive "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" + import url "net/url" + import errors "errors" + import http "net/http" + import io "io" + import math "math" + import fileutils "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" + import x509 "crypto/x509" + import base64 "encoding/base64" + import filepath "path/filepath" + import os "os" + import strconv "strconv" + import strings "strings" + import httputil "net/http/httputil" + import homedir "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" + import stdcopy "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" + import fmt "fmt" + import reflect "reflect" + import atomic "sync/atomic" + import opts "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts" + import json "encoding/json" + import bytes "bytes" + import path "path" + var @"".AuthParseError error + type @"".AuthConfiguration struct { Username string "json:\"username,omitempty\""; Password string "json:\"password,omitempty\""; Email string "json:\"email,omitempty\""; ServerAddress string "json:\"serveraddress,omitempty\"" } + type @"".AuthConfigurations struct { Configs map[string]@"".AuthConfiguration "json:\"configs\"" } + func @"".NewAuthConfigurationsFromDockerCfg () (? *@"".AuthConfigurations, ? error) + type @"io".Reader interface { Read(@"io".p []byte) (@"io".n int, @"io".err error) } + func @"".NewAuthConfigurations (@"".r·3 @"io".Reader) (? *@"".AuthConfigurations, ? error) + type @"".ChangeType int + const @"".ChangeModify @"".ChangeType = 0x0 + const @"".ChangeAdd @"".ChangeType = 0x1 + const @"".ChangeDelete @"".ChangeType = 0x2 + type @"".Change struct { Path string; Kind @"".ChangeType } + func (@"".change·2 *@"".Change) String () (? string) + var @"".ErrInvalidEndpoint error + var @"".ErrConnectionRefused error + type @"".APIVersion []int + func (@"".version·2 @"".APIVersion "esc:0x0") GreaterThan (@"".other·3 @"".APIVersion "esc:0x0") (? bool) + func (@"".version·2 @"".APIVersion "esc:0x0") GreaterThanOrEqualTo (@"".other·3 @"".APIVersion "esc:0x0") (? bool) + func (@"".version·2 @"".APIVersion "esc:0x0") LessThan (@"".other·3 @"".APIVersion "esc:0x0") (? bool) + func (@"".version·2 @"".APIVersion "esc:0x0") LessThanOrEqualTo (@"".other·3 @"".APIVersion "esc:0x0") (? bool) + func (@"".version·2 @"".APIVersion "esc:0x0") String () (? string) + func (@"".version·2 @"".APIVersion "esc:0x0") @"".compare (@"".other·3 @"".APIVersion "esc:0x0") (? int) + func @"".NewAPIVersion (@"".input·3 string) (? @"".APIVersion, ? error) + type @"io".Writer interface { Write(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"net/http".keyValues struct { @"net/http".key string; @"net/http".values []string } + type @"net/http".headerSorter struct { @"net/http".kvs []@"net/http".keyValues } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Len () (? int) { return len(@"net/http".s·2.@"net/http".kvs) } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Less (@"net/http".i·3 int, @"net/http".j·4 int) (? bool) { return @"net/http".s·2.@"net/http".kvs[@"net/http".i·3].@"net/http".key < @"net/http".s·2.@"net/http".kvs[@"net/http".j·4].@"net/http".key } + func (@"net/http".s·1 *@"net/http".headerSorter "esc:0x0") Swap (@"net/http".i·2 int, @"net/http".j·3 int) { @"net/http".s·1.@"net/http".kvs[@"net/http".i·2], @"net/http".s·1.@"net/http".kvs[@"net/http".j·3] = @"net/http".s·1.@"net/http".kvs[@"net/http".j·3], @"net/http".s·1.@"net/http".kvs[@"net/http".i·2] } + type @"net/http".Header map[string][]string + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Add (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Del (@"net/http".key·2 string "esc:0x0") + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Get (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Set (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") WriteSubset (@"net/http".w·3 @"io".Writer, @"net/http".exclude·4 map[string]bool "esc:0x0") (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".clone () (? @"net/http".Header) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".get (@"net/http".key·3 string "esc:0x0") (? string) { var @"net/http".v·4 []string; ; @"net/http".v·4 = @"net/http".h·2[@"net/http".key·3]; if len(@"net/http".v·4) > 0x0 { return @"net/http".v·4[0x0] }; return "" } + func (@"net/http".h·3 @"net/http".Header "esc:0x0") @"net/http".sortedKeyValues (@"net/http".exclude·4 map[string]bool "esc:0x0") (@"net/http".kvs·1 []@"net/http".keyValues, @"net/http".hs·2 *@"net/http".headerSorter) + type @"io".ReadCloser interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"net/url".Userinfo struct { @"net/url".username string; @"net/url".password string; @"net/url".passwordSet bool } + func (@"net/url".u·3 *@"net/url".Userinfo "esc:0x1") Password () (? string, ? bool) { if @"net/url".u·3.@"net/url".passwordSet { return @"net/url".u·3.@"net/url".password, true }; return "", false } + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") String () (? string) + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") Username () (? string) { return @"net/url".u·2.@"net/url".username } + type @"net/url".Values map[string][]string + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Add (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = append(@"net/url".v·1[@"net/url".key·2], @"net/url".value·3) } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Del (@"net/url".key·2 string "esc:0x0") { delete(@"net/url".v·1, @"net/url".key·2) } + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Encode () (? string) + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Get (@"net/url".key·3 string "esc:0x0") (? string) { if @"net/url".v·2 == nil { return "" }; var @"net/url".vs·4 []string; ; var @"net/url".ok·5 bool; ; @"net/url".vs·4, @"net/url".ok·5 = @"net/url".v·2[@"net/url".key·3]; if !@"net/url".ok·5 || len(@"net/url".vs·4) == 0x0 { return "" }; return @"net/url".vs·4[0x0] } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Set (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = ([]string{ 0x0:@"net/url".value·3 }) } + type @"net/url".URL struct { Scheme string; Opaque string; User *@"net/url".Userinfo; Host string; Path string; RawQuery string; Fragment string } + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") IsAbs () (? bool) { return @"net/url".u·2.Scheme != "" } + func (@"net/url".u·3 *@"net/url".URL "esc:0x2") Parse (@"net/url".ref·4 string) (? *@"net/url".URL, ? error) + func (@"net/url".u·2 *@"net/url".URL) Query () (? @"net/url".Values) + func (@"net/url".u·2 *@"net/url".URL "esc:0x1") RequestURI () (? string) + func (@"net/url".u·2 *@"net/url".URL "esc:0x2") ResolveReference (@"net/url".ref·3 *@"net/url".URL "esc:0x2") (? *@"net/url".URL) + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") String () (? string) + import multipart "mime/multipart" // indirect + import textproto "net/textproto" // indirect + type @"net/textproto".MIMEHeader map[string][]string + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Add (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Del (@"net/textproto".key·2 string "esc:0x0") + func (@"net/textproto".h·2 @"net/textproto".MIMEHeader "esc:0x0") Get (@"net/textproto".key·3 string "esc:0x0") (? string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Set (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + type @"mime/multipart".File interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error); ReadAt(@"io".p []byte, @"io".off int64) (@"io".n int, @"io".err error); Seek(@"io".offset int64, @"io".whence int) (? int64, ? error) } + type @"mime/multipart".FileHeader struct { Filename string; Header @"net/textproto".MIMEHeader; @"mime/multipart".content []byte; @"mime/multipart".tmpfile string } + func (@"mime/multipart".fh·3 *@"mime/multipart".FileHeader) Open () (? @"mime/multipart".File, ? error) + type @"mime/multipart".Form struct { Value map[string][]string; File map[string][]*@"mime/multipart".FileHeader } + func (@"mime/multipart".f·2 *@"mime/multipart".Form "esc:0x0") RemoveAll () (? error) + type @"crypto/x509".SignatureAlgorithm int + type @"crypto/x509".PublicKeyAlgorithm int + import big "math/big" // indirect + type @"math/big".Word uintptr + type @"math/big".divisor struct { @"math/big".bbb @"math/big".nat; @"math/big".nbits int; @"math/big".ndigits int } + import rand "math/rand" // indirect + type @"math/rand".Source interface { Int63() (? int64); Seed(@"math/rand".seed int64) } + type @"math/rand".Rand struct { @"math/rand".src @"math/rand".Source } + func (@"math/rand".r·2 *@"math/rand".Rand) ExpFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Float32 () (? float32) + func (@"math/rand".r·2 *@"math/rand".Rand) Float64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int () (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31 () (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31n (@"math/rand".n·3 int32) (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63 () (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63n (@"math/rand".n·3 int64) (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Intn (@"math/rand".n·3 int) (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) NormFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Perm (@"math/rand".n·3 int) (? []int) + func (@"math/rand".r·1 *@"math/rand".Rand) Seed (@"math/rand".seed·2 int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Uint32 () (? uint32) + type @"io".RuneScanner interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error); UnreadRune() (? error) } + type @"math/big".nat []@"math/big".Word + func (@"math/big".z·2 @"math/big".nat) @"math/big".add (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".and (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".andNot (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bit (@"math/big".i·3 uint) (? uint) { var @"math/big".j·4 int; ; @"math/big".j·4 = int(@"math/big".i·3 / 0x40); if @"math/big".j·4 >= len(@"math/big".z·2) { return 0x0 }; return uint(@"math/big".z·2[@"math/big".j·4] >> (@"math/big".i·3 % 0x40) & @"math/big".Word(0x1)) } + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".bitLen () (? int) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bytes (@"math/big".buf·3 []byte "esc:0x0") (@"math/big".i·1 int) + func (@"math/big".z·1 @"math/big".nat "esc:0x0") @"math/big".clear () + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".cmp (@"math/big".y·3 @"math/big".nat "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".q·1 @"math/big".nat) @"math/big".convertWords (@"math/big".s·2 []byte "esc:0x0", @"math/big".charset·3 string "esc:0x0", @"math/big".b·4 @"math/big".Word, @"math/big".ndigits·5 int, @"math/big".bb·6 @"math/big".Word, @"math/big".table·7 []@"math/big".divisor "esc:0x0") + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".decimalString () (? string) + func (@"math/big".z·3 @"math/big".nat) @"math/big".div (@"math/big".z2·4 @"math/big".nat, @"math/big".u·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat "esc:0x2") @"math/big".divLarge (@"math/big".u·4 @"math/big".nat, @"math/big".uIn·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat) @"math/big".divW (@"math/big".x·4 @"math/big".nat, @"math/big".y·5 @"math/big".Word) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNN (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNNWindowed (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expWW (@"math/big".x·3 @"math/big".Word, @"math/big".y·4 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".make (@"math/big".n·3 int) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat) @"math/big".modW (@"math/big".d·3 @"math/big".Word) (@"math/big".r·1 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mul (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulAddWW (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".Word, @"math/big".r·5 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulRange (@"math/big".a·3 uint64, @"math/big".b·4 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".norm () (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".or (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".n·2 @"math/big".nat) @"math/big".probablyPrime (@"math/big".reps·3 int) (? bool) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".random (@"math/big".rand·3 *@"math/rand".Rand, @"math/big".limit·4 @"math/big".nat "esc:0x0", @"math/big".n·5 int) (? @"math/big".nat) + func (@"math/big".z·4 @"math/big".nat) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? @"math/big".nat, ? int, ? error) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".set (@"math/big".x·3 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBit (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".i·4 uint, @"math/big".b·5 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBytes (@"math/big".buf·3 []byte "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setUint64 (@"math/big".x·3 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setWord (@"math/big".x·3 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shl (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shr (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".string (@"math/big".charset·3 string "esc:0x0") (? string) + func (@"math/big".z·2 @"math/big".nat) @"math/big".sub (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".trailingZeroBits () (? uint) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".xor (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + type @"fmt".State interface { Flag(@"fmt".c int) (? bool); Precision() (@"fmt".prec int, @"fmt".ok bool); Width() (@"fmt".wid int, @"fmt".ok bool); Write(@"fmt".b []byte) (@"fmt".ret int, @"fmt".err error) } + type @"fmt".ScanState interface { Read(@"fmt".buf []byte) (@"fmt".n int, @"fmt".err error); ReadRune() (@"fmt".r rune, @"fmt".size int, @"fmt".err error); SkipSpace(); Token(@"fmt".skipSpace bool, @"fmt".f func(? rune) (? bool)) (@"fmt".token []byte, @"fmt".err error); UnreadRune() (? error); Width() (@"fmt".wid int, @"fmt".ok bool) } + type @"math/big".Int struct { @"math/big".neg bool; @"math/big".abs @"math/big".nat } + func (@"math/big".z·2 *@"math/big".Int) Abs (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Add (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) And (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) AndNot (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Binomial (@"math/big".n·3 int64, @"math/big".k·4 int64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) Bit (@"math/big".i·3 int) (? uint) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") BitLen () (? int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x1") Bits () (? []@"math/big".Word) { return @"math/big".x·2.@"math/big".abs } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Bytes () (? []byte) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Cmp (@"math/big".y·3 *@"math/big".Int "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".z·2 *@"math/big".Int) Div (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) DivMod (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".m·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Exp (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int "esc:0x0", @"math/big".m·5 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·1 *@"math/big".Int "esc:0x0") Format (@"math/big".s·2 @"fmt".State, @"math/big".ch·3 rune) + func (@"math/big".z·2 *@"math/big".Int) GCD (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int, @"math/big".a·5 *@"math/big".Int, @"math/big".b·6 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) GobDecode (@"math/big".buf·3 []byte "esc:0x0") (? error) + func (@"math/big".x·3 *@"math/big".Int "esc:0x0") GobEncode () (? []byte, ? error) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Int64 () (? int64) + func (@"math/big".z·2 *@"math/big".Int) Lsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalText () (@"math/big".text·1 []byte, @"math/big".err·2 error) + func (@"math/big".z·2 *@"math/big".Int) Mod (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) ModInverse (@"math/big".g·3 *@"math/big".Int, @"math/big".n·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Mul (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) MulRange (@"math/big".a·3 int64, @"math/big".b·4 int64) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Neg (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Not (@"math/big".x·3 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Or (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) ProbablyPrime (@"math/big".n·3 int) (? bool) + func (@"math/big".z·2 *@"math/big".Int) Quo (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) QuoRem (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".r·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rand (@"math/big".rnd·3 *@"math/rand".Rand, @"math/big".n·4 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rem (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Scan (@"math/big".s·3 @"fmt".ScanState, @"math/big".ch·4 rune) (? error) + func (@"math/big".z·2 *@"math/big".Int) Set (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBit (@"math/big".x·3 *@"math/big".Int, @"math/big".i·4 int, @"math/big".b·5 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int "esc:0x2") SetBits (@"math/big".abs·3 []@"math/big".Word) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBytes (@"math/big".buf·3 []byte "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetInt64 (@"math/big".x·3 int64) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) SetString (@"math/big".s·4 string, @"math/big".base·5 int) (? *@"math/big".Int, ? bool) + func (@"math/big".z·2 *@"math/big".Int) SetUint64 (@"math/big".x·3 uint64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Sign () (? int) { if len(@"math/big".x·2.@"math/big".abs) == 0x0 { return 0x0 }; if @"math/big".x·2.@"math/big".neg { return -0x1 }; return 0x1 } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") String () (? string) + func (@"math/big".z·2 *@"math/big".Int) Sub (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Uint64 () (? uint64) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalJSON (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalText (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) Xor (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) @"math/big".binaryGCD (@"math/big".a·3 *@"math/big".Int, @"math/big".b·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·4 *@"math/big".Int) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? *@"math/big".Int, ? int, ? error) + import pkix "crypto/x509/pkix" // indirect + import asn1 "encoding/asn1" // indirect + type @"encoding/asn1".ObjectIdentifier []int + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") Equal (@"encoding/asn1".other·3 @"encoding/asn1".ObjectIdentifier "esc:0x0") (? bool) + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") String () (? string) + type @"crypto/x509/pkix".AttributeTypeAndValue struct { Type @"encoding/asn1".ObjectIdentifier; Value interface {} } + type @"crypto/x509/pkix".RelativeDistinguishedNameSET []@"crypto/x509/pkix".AttributeTypeAndValue + type @"crypto/x509/pkix".RDNSequence []@"crypto/x509/pkix".RelativeDistinguishedNameSET + type @"crypto/x509/pkix".Name struct { Country []string; Organization []string; OrganizationalUnit []string; Locality []string; Province []string; StreetAddress []string; PostalCode []string; SerialNumber string; CommonName string; Names []@"crypto/x509/pkix".AttributeTypeAndValue } + func (@"crypto/x509/pkix".n·1 *@"crypto/x509/pkix".Name) FillFromRDNSequence (@"crypto/x509/pkix".rdns·2 *@"crypto/x509/pkix".RDNSequence "esc:0x0") + func (@"crypto/x509/pkix".n·2 @"crypto/x509/pkix".Name) ToRDNSequence () (@"crypto/x509/pkix".ret·1 @"crypto/x509/pkix".RDNSequence) + type @"time".zone struct { @"time".name string; @"time".offset int; @"time".isDST bool } + type @"time".zoneTrans struct { @"time".when int64; @"time".index uint8; @"time".isstd bool; @"time".isutc bool } + type @"time".Location struct { @"time".name string; @"time".zone []@"time".zone; @"time".tx []@"time".zoneTrans; @"time".cacheStart int64; @"time".cacheEnd int64; @"time".cacheZone *@"time".zone } + func (@"time".l·2 *@"time".Location "esc:0x0") String () (? string) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".firstZoneUsed () (? bool) + func (@"time".l·2 *@"time".Location "esc:0x2") @"time".get () (? *@"time".Location) + func (@"time".l·6 *@"time".Location "esc:0x1") @"time".lookup (@"time".sec·7 int64) (@"time".name·1 string, @"time".offset·2 int, @"time".isDST·3 bool, @"time".start·4 int64, @"time".end·5 int64) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".lookupFirstZone () (? int) + func (@"time".l·4 *@"time".Location "esc:0x0") @"time".lookupName (@"time".name·5 string "esc:0x0", @"time".unix·6 int64) (@"time".offset·1 int, @"time".isDST·2 bool, @"time".ok·3 bool) + type @"time".Duration int64 + func (@"time".d·2 @"time".Duration) Hours () (? float64) { var @"time".hour·3 @"time".Duration; ; @"time".hour·3 = @"time".d·2 / @"time".Duration(0x34630B8A000); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x34630B8A000); return float64(@"time".hour·3) + float64(@"time".nsec·4) * 0x9C5FFF26ED75Fp-93 } + func (@"time".d·2 @"time".Duration) Minutes () (? float64) { var @"time".min·3 @"time".Duration; ; @"time".min·3 = @"time".d·2 / @"time".Duration(0xDF8475800); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0xDF8475800); return float64(@"time".min·3) + float64(@"time".nsec·4) * 0x9299FF347E9E9p-87 } + func (@"time".d·2 @"time".Duration) Nanoseconds () (? int64) { return int64(@"time".d·2) } + func (@"time".d·2 @"time".Duration) Seconds () (? float64) { var @"time".sec·3 @"time".Duration; ; @"time".sec·3 = @"time".d·2 / @"time".Duration(0x3B9ACA00); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x3B9ACA00); return float64(@"time".sec·3) + float64(@"time".nsec·4) * 0x112E0BE826D695p-82 } + func (@"time".d·2 @"time".Duration) String () (? string) + type @"time".Month int + func (@"time".m·2 @"time".Month) String () (? string) { return @"time".months[@"time".m·2 - @"time".Month(0x1)] } + type @"time".Weekday int + func (@"time".d·2 @"time".Weekday) String () (? string) { return @"time".days[@"time".d·2] } + type @"time".Time struct { @"time".sec int64; @"time".nsec int32; @"time".loc *@"time".Location } + func (@"time".t·2 @"time".Time "esc:0x2") Add (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") AddDate (@"time".years·3 int, @"time".months·4 int, @"time".days·5 int) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") After (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec > @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec > @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Before (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec < @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec < @"time".u·3.@"time".nsec } + func (@"time".t·4 @"time".Time "esc:0x0") Clock () (@"time".hour·1 int, @"time".min·2 int, @"time".sec·3 int) + func (@"time".t·4 @"time".Time "esc:0x0") Date () (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int) + func (@"time".t·2 @"time".Time "esc:0x0") Day () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Equal (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec == @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Format (@"time".layout·3 string "esc:0x0") (? string) + func (@"time".t·2 *@"time".Time "esc:0x0") GobDecode (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·3 @"time".Time "esc:0x0") GobEncode () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Hour () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") ISOWeek () (@"time".year·1 int, @"time".week·2 int) + func (@"time".t·2 @"time".Time "esc:0x2") In (@"time".loc·3 *@"time".Location "esc:0x2") (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") IsZero () (? bool) { return @"time".t·2.@"time".sec == 0x0 && @"time".t·2.@"time".nsec == 0x0 } + func (@"time".t·2 @"time".Time "esc:0x2") Local () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".Local; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x2") Location () (? *@"time".Location) { var @"time".l·3 *@"time".Location; ; @"time".l·3 = @"time".t·2.@"time".loc; if @"time".l·3 == nil { @"time".l·3 = @"time".UTC }; return @"time".l·3 } + func (@"time".t·3 @"time".Time "esc:0x0") MarshalBinary () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalText () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Minute () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Month () (? @"time".Month) + func (@"time".t·2 @"time".Time "esc:0x0") Nanosecond () (? int) { return int(@"time".t·2.@"time".nsec) } + func (@"time".t·2 @"time".Time "esc:0x2") Round (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") Second () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") String () (? string) + func (@"time".t·2 @"time".Time "esc:0x0") Sub (@"time".u·3 @"time".Time "esc:0x0") (? @"time".Duration) + func (@"time".t·2 @"time".Time "esc:0x2") Truncate (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") UTC () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".UTC; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x0") Unix () (? int64) { return @"time".t·2.@"time".sec + -0xE7791F700 } + func (@"time".t·2 @"time".Time "esc:0x0") UnixNano () (? int64) { return (@"time".t·2.@"time".sec + -0xE7791F700) * 0x3B9ACA00 + int64(@"time".t·2.@"time".nsec) } + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalBinary (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalJSON (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalText (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 @"time".Time "esc:0x0") Weekday () (? @"time".Weekday) + func (@"time".t·2 @"time".Time "esc:0x0") Year () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") YearDay () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") Zone () (@"time".name·1 string, @"time".offset·2 int) + func (@"time".t·2 @"time".Time "esc:0x0") @"time".abs () (? uint64) + func (@"time".t·5 @"time".Time "esc:0x0") @"time".date (@"time".full·6 bool) (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int, @"time".yday·4 int) + func (@"time".t·4 @"time".Time "esc:0x1") @"time".locabs () (@"time".name·1 string, @"time".offset·2 int, @"time".abs·3 uint64) + type @"crypto/x509".KeyUsage int + type @"crypto/x509/pkix".Extension struct { Id @"encoding/asn1".ObjectIdentifier; Critical bool "asn1:\"optional\""; Value []byte } + type @"crypto/x509".ExtKeyUsage int + type @"net".IPMask []byte + func (@"net".m·3 @"net".IPMask "esc:0x0") Size () (@"net".ones·1 int, @"net".bits·2 int) + func (@"net".m·2 @"net".IPMask "esc:0x0") String () (? string) + type @"net".IP []byte + func (@"net".ip·2 @"net".IP "esc:0x0") DefaultMask () (? @"net".IPMask) + func (@"net".ip·2 @"net".IP "esc:0x0") Equal (@"net".x·3 @"net".IP "esc:0x0") (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsGlobalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsInterfaceLocalMulticast () (? bool) { return len(@"net".ip·2) == 0x10 && @"net".ip·2[0x0] == byte(0xFF) && @"net".ip·2[0x1] & byte(0xF) == byte(0x1) } + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLoopback () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsUnspecified () (? bool) + func (@"net".ip·3 @"net".IP "esc:0x0") MarshalText () (? []byte, ? error) + func (@"net".ip·2 @"net".IP "esc:0x0") Mask (@"net".mask·3 @"net".IPMask "esc:0x0") (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x0") String () (? string) + func (@"net".ip·2 @"net".IP "esc:0x2") To16 () (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x2") To4 () (? @"net".IP) + func (@"net".ip·2 *@"net".IP "esc:0x0") UnmarshalText (@"net".text·3 []byte "esc:0x0") (? error) + type @"encoding/asn1".RawContent []byte + type @"encoding/asn1".RawValue struct { Class int; Tag int; IsCompound bool; Bytes []byte; FullBytes []byte } + type @"crypto/x509/pkix".AlgorithmIdentifier struct { Algorithm @"encoding/asn1".ObjectIdentifier; Parameters @"encoding/asn1".RawValue "asn1:\"optional\"" } + type @"crypto/x509/pkix".RevokedCertificate struct { SerialNumber *@"math/big".Int; RevocationTime @"time".Time; Extensions []@"crypto/x509/pkix".Extension "asn1:\"optional\"" } + type @"crypto/x509/pkix".TBSCertificateList struct { Raw @"encoding/asn1".RawContent; Version int "asn1:\"optional,default:2\""; Signature @"crypto/x509/pkix".AlgorithmIdentifier; Issuer @"crypto/x509/pkix".RDNSequence; ThisUpdate @"time".Time; NextUpdate @"time".Time "asn1:\"optional\""; RevokedCertificates []@"crypto/x509/pkix".RevokedCertificate "asn1:\"optional\""; Extensions []@"crypto/x509/pkix".Extension "asn1:\"tag:0,optional,explicit\"" } + type @"encoding/asn1".BitString struct { Bytes []byte; BitLength int } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x0") At (@"encoding/asn1".i·3 int) (? int) { if @"encoding/asn1".i·3 < 0x0 || @"encoding/asn1".i·3 >= @"encoding/asn1".b·2.BitLength { return 0x0 }; var @"encoding/asn1".x·4 int; ; @"encoding/asn1".x·4 = @"encoding/asn1".i·3 / 0x8; var @"encoding/asn1".y·5 uint; ; @"encoding/asn1".y·5 = 0x7 - uint(@"encoding/asn1".i·3 % 0x8); return int(@"encoding/asn1".b·2.Bytes[@"encoding/asn1".x·4] >> @"encoding/asn1".y·5) & 0x1 } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x2") RightAlign () (? []byte) + type @"crypto/x509/pkix".CertificateList struct { TBSCertList @"crypto/x509/pkix".TBSCertificateList; SignatureAlgorithm @"crypto/x509/pkix".AlgorithmIdentifier; SignatureValue @"encoding/asn1".BitString } + func (@"crypto/x509/pkix".certList·2 *@"crypto/x509/pkix".CertificateList "esc:0x0") HasExpired (@"crypto/x509/pkix".now·3 @"time".Time "esc:0x0") (? bool) + type @"crypto/x509".CertPool struct { @"crypto/x509".bySubjectKeyId map[string][]int; @"crypto/x509".byName map[string][]int; @"crypto/x509".certs []*@"crypto/x509".Certificate } + func (@"crypto/x509".s·1 *@"crypto/x509".CertPool) AddCert (@"crypto/x509".cert·2 *@"crypto/x509".Certificate) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool) AppendCertsFromPEM (@"crypto/x509".pemCerts·3 []byte) (@"crypto/x509".ok·1 bool) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool "esc:0x0") Subjects () (@"crypto/x509".res·1 [][]byte) + func (@"crypto/x509".s·4 *@"crypto/x509".CertPool "esc:0x0") @"crypto/x509".findVerifiedParents (@"crypto/x509".cert·5 *@"crypto/x509".Certificate) (@"crypto/x509".parents·1 []int, @"crypto/x509".errCert·2 *@"crypto/x509".Certificate, @"crypto/x509".err·3 error) + type @"crypto/x509".VerifyOptions struct { DNSName string; Intermediates *@"crypto/x509".CertPool; Roots *@"crypto/x509".CertPool; CurrentTime @"time".Time; KeyUsages []@"crypto/x509".ExtKeyUsage } + type @"crypto/x509".Certificate struct { Raw []byte; RawTBSCertificate []byte; RawSubjectPublicKeyInfo []byte; RawSubject []byte; RawIssuer []byte; Signature []byte; SignatureAlgorithm @"crypto/x509".SignatureAlgorithm; PublicKeyAlgorithm @"crypto/x509".PublicKeyAlgorithm; PublicKey interface {}; Version int; SerialNumber *@"math/big".Int; Issuer @"crypto/x509/pkix".Name; Subject @"crypto/x509/pkix".Name; NotBefore @"time".Time; NotAfter @"time".Time; KeyUsage @"crypto/x509".KeyUsage; Extensions []@"crypto/x509/pkix".Extension; ExtraExtensions []@"crypto/x509/pkix".Extension; ExtKeyUsage []@"crypto/x509".ExtKeyUsage; UnknownExtKeyUsage []@"encoding/asn1".ObjectIdentifier; BasicConstraintsValid bool; IsCA bool; MaxPathLen int; MaxPathLenZero bool; SubjectKeyId []byte; AuthorityKeyId []byte; OCSPServer []string; IssuingCertificateURL []string; DNSNames []string; EmailAddresses []string; IPAddresses []@"net".IP; PermittedDNSDomainsCritical bool; PermittedDNSDomains []string; CRLDistributionPoints []string; PolicyIdentifiers []@"encoding/asn1".ObjectIdentifier } + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckCRLSignature (@"crypto/x509".crl·3 *@"crypto/x509/pkix".CertificateList) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignature (@"crypto/x509".algo·3 @"crypto/x509".SignatureAlgorithm, @"crypto/x509".signed·4 []byte, @"crypto/x509".signature·5 []byte) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignatureFrom (@"crypto/x509".parent·3 *@"crypto/x509".Certificate) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) CreateCRL (@"crypto/x509".rand·4 @"io".Reader, @"crypto/x509".priv·5 interface {}, @"crypto/x509".revokedCerts·6 []@"crypto/x509/pkix".RevokedCertificate, @"crypto/x509".now·7 @"time".Time, @"crypto/x509".expiry·8 @"time".Time) (@"crypto/x509".crlBytes·1 []byte, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x0") Equal (@"crypto/x509".other·3 *@"crypto/x509".Certificate "esc:0x0") (? bool) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) Verify (@"crypto/x509".opts·4 @"crypto/x509".VerifyOptions "esc:0x4") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") VerifyHostname (@"crypto/x509".h·3 string "esc:0x2") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) @"crypto/x509".buildChains (@"crypto/x509".cache·4 map[int][][]*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".currentChain·5 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·6 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") @"crypto/x509".isValid (@"crypto/x509".certType·3 int, @"crypto/x509".currentChain·4 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·5 *@"crypto/x509".VerifyOptions "esc:0x0") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate "esc:0x0") @"crypto/x509".systemVerify (@"crypto/x509".opts·4 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) { return nil, nil } + type @"crypto/tls".ConnectionState struct { Version uint16; HandshakeComplete bool; DidResume bool; CipherSuite uint16; NegotiatedProtocol string; NegotiatedProtocolIsMutual bool; ServerName string; PeerCertificates []*@"crypto/x509".Certificate; VerifiedChains [][]*@"crypto/x509".Certificate; TLSUnique []byte } + type @"net/http".Cookie struct { Name string; Value string; Path string; Domain string; Expires @"time".Time; RawExpires string; MaxAge int; Secure bool; HttpOnly bool; Raw string; Unparsed []string } + func (@"net/http".c·2 *@"net/http".Cookie) String () (? string) + type @"bufio".Reader struct { @"bufio".buf []byte; @"bufio".rd @"io".Reader; @"bufio".r int; @"bufio".w int; @"bufio".err error; @"bufio".lastByte int; @"bufio".lastRuneSize int } + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") Buffered () (? int) { return @"bufio".b·2.@"bufio".w - @"bufio".b·2.@"bufio".r } + func (@"bufio".b·3 *@"bufio".Reader) Peek (@"bufio".n·4 int) (? []byte, ? error) + func (@"bufio".b·3 *@"bufio".Reader) Read (@"bufio".p·4 []byte) (@"bufio".n·1 int, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadByte () (@"bufio".c·1 byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadBytes (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadLine () (@"bufio".line·1 []byte, @"bufio".isPrefix·2 bool, @"bufio".err·3 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadRune () (@"bufio".r·1 rune, @"bufio".size·2 int, @"bufio".err·3 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadSlice (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadString (@"bufio".delim·4 byte) (@"bufio".line·1 string, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) Reset (@"bufio".r·2 @"io".Reader) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadByte () (? error) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadRune () (? error) { if @"bufio".b·2.@"bufio".lastRuneSize < 0x0 || @"bufio".b·2.@"bufio".r < @"bufio".b·2.@"bufio".lastRuneSize { return @"bufio".ErrInvalidUnreadRune }; @"bufio".b·2.@"bufio".r -= @"bufio".b·2.@"bufio".lastRuneSize; @"bufio".b·2.@"bufio".lastByte = -0x1; @"bufio".b·2.@"bufio".lastRuneSize = -0x1; return nil } + func (@"bufio".b·3 *@"bufio".Reader) WriteTo (@"bufio".w·4 @"io".Writer) (@"bufio".n·1 int64, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) @"bufio".fill () + func (@"bufio".b·2 *@"bufio".Reader "esc:0x1") @"bufio".readErr () (? error) { var @"bufio".err·3 error; ; @"bufio".err·3 = @"bufio".b·2.@"bufio".err; @"bufio".b·2.@"bufio".err = nil; return @"bufio".err·3 } + func (@"bufio".b·1 *@"bufio".Reader "esc:0x0") @"bufio".reset (@"bufio".buf·2 []byte, @"bufio".r·3 @"io".Reader) { *@"bufio".b·1 = (@"bufio".Reader{ @"bufio".buf:@"bufio".buf·2, @"bufio".rd:@"bufio".r·3, @"bufio".lastByte:-0x1, @"bufio".lastRuneSize:-0x1 }) } + func (@"bufio".b·3 *@"bufio".Reader) @"bufio".writeBuf (@"bufio".w·4 @"io".Writer) (? int64, ? error) + type @"bytes".readOp int + type @"bytes".Buffer struct { @"bytes".buf []byte; @"bytes".off int; @"bytes".runeBytes [4]byte; @"bytes".bootstrap [64]byte; @"bytes".lastRead @"bytes".readOp } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Bytes () (? []byte) { return @"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:] } + func (@"bytes".b·1 *@"bytes".Buffer) Grow (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") Len () (? int) { return len(@"bytes".b·2.@"bytes".buf) - @"bytes".b·2.@"bytes".off } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Next (@"bytes".n·3 int) (? []byte) + func (@"bytes".b·3 *@"bytes".Buffer) Read (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadByte () (@"bytes".c·1 byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadBytes (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadFrom (@"bytes".r·4 @"io".Reader) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·4 *@"bytes".Buffer) ReadRune () (@"bytes".r·1 rune, @"bytes".size·2 int, @"bytes".err·3 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadString (@"bytes".delim·4 byte) (@"bytes".line·1 string, @"bytes".err·2 error) + func (@"bytes".b·1 *@"bytes".Buffer) Reset () + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") String () (? string) { if @"bytes".b·2 == nil { return "" }; return string(@"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:]) } + func (@"bytes".b·1 *@"bytes".Buffer) Truncate (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadByte () (? error) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadRune () (? error) + func (@"bytes".b·3 *@"bytes".Buffer) Write (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) WriteByte (@"bytes".c·3 byte) (? error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteRune (@"bytes".r·4 rune) (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteString (@"bytes".s·4 string "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteTo (@"bytes".w·4 @"io".Writer) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) @"bytes".grow (@"bytes".n·3 int) (? int) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x1") @"bytes".readSlice (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + type @"mime/multipart".Part struct { Header @"net/textproto".MIMEHeader; @"mime/multipart".buffer *@"bytes".Buffer; @"mime/multipart".mr *@"mime/multipart".Reader; @"mime/multipart".bytesRead int; @"mime/multipart".disposition string; @"mime/multipart".dispositionParams map[string]string; @"mime/multipart".r @"io".Reader } + func (@"mime/multipart".p·2 *@"mime/multipart".Part) Close () (? error) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FileName () (? string) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FormName () (? string) + func (@"mime/multipart".p·3 *@"mime/multipart".Part) Read (@"mime/multipart".d·4 []byte) (@"mime/multipart".n·1 int, @"mime/multipart".err·2 error) + func (@"mime/multipart".p·1 *@"mime/multipart".Part "esc:0x0") @"mime/multipart".parseContentDisposition () + func (@"mime/multipart".bp·2 *@"mime/multipart".Part) @"mime/multipart".populateHeaders () (? error) + type @"mime/multipart".Reader struct { @"mime/multipart".bufReader *@"bufio".Reader; @"mime/multipart".currentPart *@"mime/multipart".Part; @"mime/multipart".partsRead int; @"mime/multipart".nl []byte; @"mime/multipart".nlDashBoundary []byte; @"mime/multipart".dashBoundaryDash []byte; @"mime/multipart".dashBoundary []byte } + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) NextPart () (? *@"mime/multipart".Part, ? error) + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) ReadForm (@"mime/multipart".maxMemory·4 int64) (@"mime/multipart".f·1 *@"mime/multipart".Form, @"mime/multipart".err·2 error) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader) @"mime/multipart".isBoundaryDelimiterLine (@"mime/multipart".line·3 []byte "esc:0x0") (@"mime/multipart".ret·1 bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".isFinalBoundary (@"mime/multipart".line·3 []byte "esc:0x0") (? bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".peekBufferIsEmptyPart (@"mime/multipart".peek·3 []byte "esc:0x0") (? bool) + type @"net/http".Request struct { Method string; URL *@"net/url".URL; Proto string; ProtoMajor int; ProtoMinor int; Header @"net/http".Header; Body @"io".ReadCloser; ContentLength int64; TransferEncoding []string; Close bool; Host string; Form @"net/url".Values; PostForm @"net/url".Values; MultipartForm *@"mime/multipart".Form; Trailer @"net/http".Header; RemoteAddr string; RequestURI string; TLS *@"crypto/tls".ConnectionState } + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") AddCookie (@"net/http".c·2 *@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request "esc:0x0") BasicAuth () (@"net/http".username·1 string, @"net/http".password·2 string, @"net/http".ok·3 bool) + func (@"net/http".r·3 *@"net/http".Request "esc:0x0") Cookie (@"net/http".name·4 string "esc:0x0") (? *@"net/http".Cookie, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Cookies () (? []*@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request) FormFile (@"net/http".key·5 string "esc:0x0") (? @"mime/multipart".File, ? *@"mime/multipart".FileHeader, ? error) + func (@"net/http".r·2 *@"net/http".Request) FormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·3 *@"net/http".Request) MultipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request) ParseForm () (? error) + func (@"net/http".r·2 *@"net/http".Request) ParseMultipartForm (@"net/http".maxMemory·3 int64) (? error) + func (@"net/http".r·2 *@"net/http".Request) PostFormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") ProtoAtLeast (@"net/http".major·3 int, @"net/http".minor·4 int) (? bool) { return @"net/http".r·2.ProtoMajor > @"net/http".major·3 || @"net/http".r·2.ProtoMajor == @"net/http".major·3 && @"net/http".r·2.ProtoMinor >= @"net/http".minor·4 } + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Referer () (? string) + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") SetBasicAuth (@"net/http".username·2 string "esc:0x0", @"net/http".password·3 string "esc:0x0") + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") UserAgent () (? string) + func (@"net/http".r·2 *@"net/http".Request) Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·2 *@"net/http".Request) WriteProxy (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·1 *@"net/http".Request) @"net/http".closeBody () + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".expectsContinue () (? bool) + func (@"net/http".r·3 *@"net/http".Request) @"net/http".multipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsClose () (? bool) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsHttp10KeepAlive () (? bool) + func (@"net/http".req·2 *@"net/http".Request) @"net/http".write (@"net/http".w·3 @"io".Writer, @"net/http".usingProxy·4 bool, @"net/http".extraHeaders·5 @"net/http".Header "esc:0x0") (? error) + type @"net/http".Response struct { Status string; StatusCode int; Proto string; ProtoMajor int; ProtoMinor int; Header @"net/http".Header; Body @"io".ReadCloser; ContentLength int64; TransferEncoding []string; Close bool; Trailer @"net/http".Header; Request *@"net/http".Request; TLS *@"crypto/tls".ConnectionState } + func (@"net/http".r·2 *@"net/http".Response "esc:0x0") Cookies () (? []*@"net/http".Cookie) + func (@"net/http".r·3 *@"net/http".Response "esc:0x1") Location () (? *@"net/url".URL, ? error) + func (@"net/http".r·2 *@"net/http".Response "esc:0x0") ProtoAtLeast (@"net/http".major·3 int, @"net/http".minor·4 int) (? bool) { return @"net/http".r·2.ProtoMajor > @"net/http".major·3 || @"net/http".r·2.ProtoMajor == @"net/http".major·3 && @"net/http".r·2.ProtoMinor >= @"net/http".minor·4 } + func (@"net/http".r·2 *@"net/http".Response) Write (@"net/http".w·3 @"io".Writer) (? error) + type @"net/http".RoundTripper interface { RoundTrip(? *@"net/http".Request) (? *@"net/http".Response, ? error) } + type @"net/http".CookieJar interface { Cookies(@"net/http".u *@"net/url".URL) (? []*@"net/http".Cookie); SetCookies(@"net/http".u *@"net/url".URL, @"net/http".cookies []*@"net/http".Cookie) } + type @"net/http".Client struct { Transport @"net/http".RoundTripper; CheckRedirect func(@"net/http".req *@"net/http".Request, @"net/http".via []*@"net/http".Request) (? error); Jar @"net/http".CookieJar; Timeout @"time".Duration } + func (@"net/http".c·3 *@"net/http".Client) Do (@"net/http".req·4 *@"net/http".Request) (@"net/http".resp·1 *@"net/http".Response, @"net/http".err·2 error) + func (@"net/http".c·3 *@"net/http".Client) Get (@"net/http".url·4 string) (@"net/http".resp·1 *@"net/http".Response, @"net/http".err·2 error) + func (@"net/http".c·3 *@"net/http".Client) Head (@"net/http".url·4 string) (@"net/http".resp·1 *@"net/http".Response, @"net/http".err·2 error) + func (@"net/http".c·3 *@"net/http".Client) Post (@"net/http".url·4 string, @"net/http".bodyType·5 string, @"net/http".body·6 @"io".Reader) (@"net/http".resp·1 *@"net/http".Response, @"net/http".err·2 error) + func (@"net/http".c·3 *@"net/http".Client) PostForm (@"net/http".url·4 string, @"net/http".data·5 @"net/url".Values "esc:0x0") (@"net/http".resp·1 *@"net/http".Response, @"net/http".err·2 error) + func (@"net/http".c·3 *@"net/http".Client) @"net/http".doFollowingRedirects (@"net/http".ireq·4 *@"net/http".Request, @"net/http".shouldRedirect·5 func(? int) (? bool) "esc:0x0") (@"net/http".resp·1 *@"net/http".Response, @"net/http".err·2 error) + func (@"net/http".c·3 *@"net/http".Client) @"net/http".send (@"net/http".req·4 *@"net/http".Request) (? *@"net/http".Response, ? error) + func (@"net/http".c·2 *@"net/http".Client "esc:0x1") @"net/http".transport () (? @"net/http".RoundTripper) { if @"net/http".c·2.Transport != nil { return @"net/http".c·2.Transport }; return @"net/http".DefaultTransport } + import crypto "crypto" // indirect + type @"crypto".PrivateKey interface {} + type @"crypto/tls".Certificate struct { Certificate [][]byte; PrivateKey @"crypto".PrivateKey; OCSPStaple []byte; Leaf *@"crypto/x509".Certificate } + type @"crypto/tls".CurveID uint16 + type @"crypto/tls".ClientHelloInfo struct { CipherSuites []uint16; ServerName string; SupportedCurves []@"crypto/tls".CurveID; SupportedPoints []uint8 } + type @"crypto/tls".ClientAuthType int + type @"crypto/tls".ClientSessionState struct { @"crypto/tls".sessionTicket []uint8; @"crypto/tls".vers uint16; @"crypto/tls".cipherSuite uint16; @"crypto/tls".masterSecret []byte; @"crypto/tls".serverCertificates []*@"crypto/x509".Certificate } + type @"crypto/tls".ClientSessionCache interface { Get(@"crypto/tls".sessionKey string) (@"crypto/tls".session *@"crypto/tls".ClientSessionState, @"crypto/tls".ok bool); Put(@"crypto/tls".sessionKey string, @"crypto/tls".cs *@"crypto/tls".ClientSessionState) } + type @"sync".Mutex struct { @"sync".state int32; @"sync".sema uint32 } + func (@"sync".m·1 *@"sync".Mutex) Lock () + func (@"sync".m·1 *@"sync".Mutex) Unlock () + type @"sync".Once struct { @"sync".m @"sync".Mutex; @"sync".done uint32 } + func (@"sync".o·1 *@"sync".Once) Do (@"sync".f·2 func() "esc:0x0") + type @"crypto/tls".Config struct { Rand @"io".Reader; Time func() (? @"time".Time); Certificates []@"crypto/tls".Certificate; NameToCertificate map[string]*@"crypto/tls".Certificate; GetCertificate func(@"crypto/tls".clientHello *@"crypto/tls".ClientHelloInfo) (? *@"crypto/tls".Certificate, ? error); RootCAs *@"crypto/x509".CertPool; NextProtos []string; ServerName string; ClientAuth @"crypto/tls".ClientAuthType; ClientCAs *@"crypto/x509".CertPool; InsecureSkipVerify bool; CipherSuites []uint16; PreferServerCipherSuites bool; SessionTicketsDisabled bool; SessionTicketKey [32]byte; ClientSessionCache @"crypto/tls".ClientSessionCache; MinVersion uint16; MaxVersion uint16; CurvePreferences []@"crypto/tls".CurveID; @"crypto/tls".serverInitOnce @"sync".Once } + func (@"crypto/tls".c·1 *@"crypto/tls".Config) BuildNameToCertificate () + func (@"crypto/tls".c·2 *@"crypto/tls".Config "esc:0x1") @"crypto/tls".cipherSuites () (? []uint16) + func (@"crypto/tls".c·2 *@"crypto/tls".Config "esc:0x1") @"crypto/tls".curvePreferences () (? []@"crypto/tls".CurveID) { if @"crypto/tls".c·2 == nil || len(@"crypto/tls".c·2.CurvePreferences) == 0x0 { return @"crypto/tls".defaultCurvePreferences }; return @"crypto/tls".c·2.CurvePreferences } + func (@"crypto/tls".c·3 *@"crypto/tls".Config "esc:0x1") @"crypto/tls".getCertificate (@"crypto/tls".clientHello·4 *@"crypto/tls".ClientHelloInfo) (? *@"crypto/tls".Certificate, ? error) + func (@"crypto/tls".c·2 *@"crypto/tls".Config "esc:0x0") @"crypto/tls".maxVersion () (? uint16) { if @"crypto/tls".c·2 == nil || @"crypto/tls".c·2.MaxVersion == 0x0 { return 0x303 }; return @"crypto/tls".c·2.MaxVersion } + func (@"crypto/tls".c·2 *@"crypto/tls".Config "esc:0x0") @"crypto/tls".minVersion () (? uint16) { if @"crypto/tls".c·2 == nil || @"crypto/tls".c·2.MinVersion == 0x0 { return 0x300 }; return @"crypto/tls".c·2.MinVersion } + func (@"crypto/tls".c·3 *@"crypto/tls".Config "esc:0x0") @"crypto/tls".mutualVersion (@"crypto/tls".vers·4 uint16) (? uint16, ? bool) + func (@"crypto/tls".c·2 *@"crypto/tls".Config "esc:0x1") @"crypto/tls".rand () (? @"io".Reader) { var @"crypto/tls".r·3 @"io".Reader; ; @"crypto/tls".r·3 = @"crypto/tls".c·2.Rand; if @"crypto/tls".r·3 == nil { return @"crypto/rand".Reader }; return @"crypto/tls".r·3 } + func (@"crypto/tls".c·1 *@"crypto/tls".Config) @"crypto/tls".serverInit () + func (@"crypto/tls".c·2 *@"crypto/tls".Config "esc:0x0") @"crypto/tls".time () (? @"time".Time) + type @"sync".Locker interface { Lock(); Unlock() } + type @"sync".RWMutex struct { @"sync".w @"sync".Mutex; @"sync".writerSem uint32; @"sync".readerSem uint32; @"sync".readerCount int32; @"sync".readerWait int32 } + func (@"sync".rw·1 *@"sync".RWMutex) Lock () + func (@"sync".rw·1 *@"sync".RWMutex) RLock () + func (@"sync".rw·2 *@"sync".RWMutex "esc:0x2") RLocker () (? @"sync".Locker) { return (*@"sync".rlocker)(@"sync".rw·2) } + func (@"sync".rw·1 *@"sync".RWMutex) RUnlock () + func (@"sync".rw·1 *@"sync".RWMutex) Unlock () + type @"sync".WaitGroup struct { @"sync".m @"sync".Mutex; @"sync".counter int32; @"sync".waiters int32; @"sync".sema *uint32 } + func (@"sync".wg·1 *@"sync".WaitGroup) Add (@"sync".delta·2 int) + func (@"sync".wg·1 *@"sync".WaitGroup) Done () + func (@"sync".wg·1 *@"sync".WaitGroup) Wait () + type @"".APIEvents struct { Status string "json:\"Status,omitempty\" yaml:\"Status,omitempty\""; ID string "json:\"ID,omitempty\" yaml:\"ID,omitempty\""; From string "json:\"From,omitempty\" yaml:\"From,omitempty\""; Time int64 "json:\"Time,omitempty\" yaml:\"Time,omitempty\"" } + type @"".eventMonitoringState struct { ? @"sync".RWMutex; ? @"sync".WaitGroup; @"".enabled bool; @"".lastSeen *int64; C chan *@"".APIEvents; @"".errC chan error; @"".listeners []chan<- *@"".APIEvents } + func (@"".eventState·2 *@"".eventMonitoringState) @"".addListener (@"".listener·3 chan<- *@"".APIEvents) (? error) + func (@"".eventState·1 *@"".eventMonitoringState) @"".closeListeners () + func (@"".eventState·2 *@"".eventMonitoringState) @"".connectWithRetry (@"".c·3 *@"".Client) (? error) + func (@"".eventState·2 *@"".eventMonitoringState) @"".disableEventMonitoring () (? error) + func (@"".eventState·2 *@"".eventMonitoringState) @"".enableEventMonitoring (@"".c·3 *@"".Client) (? error) + func (@"".eventState·2 *@"".eventMonitoringState) @"".isEnabled () (? bool) + func (@"".eventState·1 *@"".eventMonitoringState) @"".monitorEvents (@"".c·2 *@"".Client) + func (@"".eventState·2 *@"".eventMonitoringState) @"".noListeners () (? bool) + func (@"".eventState·2 *@"".eventMonitoringState) @"".removeListener (@"".listener·3 chan<- *@"".APIEvents "esc:0x0") (? error) + func (@"".eventState·1 *@"".eventMonitoringState) @"".sendEvent (@"".event·2 *@"".APIEvents) + func (@"".eventState·1 *@"".eventMonitoringState) @"".updateLastSeen (@"".e·2 *@"".APIEvents "esc:0x0") + type @"".doOptions struct { @"".data interface {}; @"".forceJSON bool } + type @"".streamOptions struct { @"".setRawTerminal bool; @"".rawJSONStream bool; @"".useJSONDecoder bool; @"".headers map[string]string; @"".in @"io".Reader; @"".stdout @"io".Writer; @"".stderr @"io".Writer; @"".timeout @"time".Duration } + type @"".hijackOptions struct { @"".success chan struct {}; @"".setRawTerminal bool; @"".in @"io".Reader; @"".stdout @"io".Writer; @"".stderr @"io".Writer; @"".data interface {} } + type @"".APIPort struct { PrivatePort int64 "json:\"PrivatePort,omitempty\" yaml:\"PrivatePort,omitempty\""; PublicPort int64 "json:\"PublicPort,omitempty\" yaml:\"PublicPort,omitempty\""; Type string "json:\"Type,omitempty\" yaml:\"Type,omitempty\""; IP string "json:\"IP,omitempty\" yaml:\"IP,omitempty\"" } + type @"".APIContainers struct { ID string "json:\"Id\" yaml:\"Id\""; Image string "json:\"Image,omitempty\" yaml:\"Image,omitempty\""; Command string "json:\"Command,omitempty\" yaml:\"Command,omitempty\""; Created int64 "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; Status string "json:\"Status,omitempty\" yaml:\"Status,omitempty\""; Ports []@"".APIPort "json:\"Ports,omitempty\" yaml:\"Ports,omitempty\""; SizeRw int64 "json:\"SizeRw,omitempty\" yaml:\"SizeRw,omitempty\""; SizeRootFs int64 "json:\"SizeRootFs,omitempty\" yaml:\"SizeRootFs,omitempty\""; Names []string "json:\"Names,omitempty\" yaml:\"Names,omitempty\"" } + type @"".ListContainersOptions struct { All bool; Size bool; Limit int; Since string; Before string; Filters map[string][]string } + type @"".RenameContainerOptions struct { ID string "qs:\"-\""; Name string "json:\"name,omitempty\" yaml:\"name,omitempty\"" } + type @"".Port string + func (@"".p·2 @"".Port "esc:0x0") Port () (? string) + func (@"".p·2 @"".Port "esc:0x0") Proto () (? string) + type @"".Config struct { Hostname string "json:\"Hostname,omitempty\" yaml:\"Hostname,omitempty\""; Domainname string "json:\"Domainname,omitempty\" yaml:\"Domainname,omitempty\""; User string "json:\"User,omitempty\" yaml:\"User,omitempty\""; Memory int64 "json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""; MemorySwap int64 "json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""; CPUShares int64 "json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""; CPUSet string "json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""; AttachStdin bool "json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\""; AttachStdout bool "json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\""; AttachStderr bool "json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\""; PortSpecs []string "json:\"PortSpecs,omitempty\" yaml:\"PortSpecs,omitempty\""; ExposedPorts map[@"".Port]struct {} "json:\"ExposedPorts,omitempty\" yaml:\"ExposedPorts,omitempty\""; Tty bool "json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""; OpenStdin bool "json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""; StdinOnce bool "json:\"StdinOnce,omitempty\" yaml:\"StdinOnce,omitempty\""; Env []string "json:\"Env,omitempty\" yaml:\"Env,omitempty\""; Cmd []string "json:\"Cmd\" yaml:\"Cmd\""; DNS []string "json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""; Image string "json:\"Image,omitempty\" yaml:\"Image,omitempty\""; Volumes map[string]struct {} "json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""; VolumesFrom string "json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""; WorkingDir string "json:\"WorkingDir,omitempty\" yaml:\"WorkingDir,omitempty\""; MacAddress string "json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""; Entrypoint []string "json:\"Entrypoint\" yaml:\"Entrypoint\""; NetworkDisabled bool "json:\"NetworkDisabled,omitempty\" yaml:\"NetworkDisabled,omitempty\""; SecurityOpts []string "json:\"SecurityOpts,omitempty\" yaml:\"SecurityOpts,omitempty\""; OnBuild []string "json:\"OnBuild,omitempty\" yaml:\"OnBuild,omitempty\""; Labels map[string]string "json:\"Labels,omitempty\" yaml:\"Labels,omitempty\"" } + type @"".State struct { Running bool "json:\"Running,omitempty\" yaml:\"Running,omitempty\""; Paused bool "json:\"Paused,omitempty\" yaml:\"Paused,omitempty\""; Restarting bool "json:\"Restarting,omitempty\" yaml:\"Restarting,omitempty\""; OOMKilled bool "json:\"OOMKilled,omitempty\" yaml:\"OOMKilled,omitempty\""; Pid int "json:\"Pid,omitempty\" yaml:\"Pid,omitempty\""; ExitCode int "json:\"ExitCode,omitempty\" yaml:\"ExitCode,omitempty\""; Error string "json:\"Error,omitempty\" yaml:\"Error,omitempty\""; StartedAt @"time".Time "json:\"StartedAt,omitempty\" yaml:\"StartedAt,omitempty\""; FinishedAt @"time".Time "json:\"FinishedAt,omitempty\" yaml:\"FinishedAt,omitempty\"" } + func (@"".s·2 *@"".State "esc:0x0") String () (? string) + type @"".SwarmNode struct { ID string "json:\"ID,omitempty\" yaml:\"ID,omitempty\""; IP string "json:\"IP,omitempty\" yaml:\"IP,omitempty\""; Addr string "json:\"Addr,omitempty\" yaml:\"Addr,omitempty\""; Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; CPUs int64 "json:\"CPUs,omitempty\" yaml:\"CPUs,omitempty\""; Memory int64 "json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""; Labels map[string]string "json:\"Labels,omitempty\" yaml:\"Labels,omitempty\"" } + type @"".PortMapping map[string]string + type @"".PortBinding struct { HostIP string "json:\"HostIP,omitempty\" yaml:\"HostIP,omitempty\""; HostPort string "json:\"HostPort,omitempty\" yaml:\"HostPort,omitempty\"" } + type @"".NetworkSettings struct { IPAddress string "json:\"IPAddress,omitempty\" yaml:\"IPAddress,omitempty\""; IPPrefixLen int "json:\"IPPrefixLen,omitempty\" yaml:\"IPPrefixLen,omitempty\""; MacAddress string "json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""; Gateway string "json:\"Gateway,omitempty\" yaml:\"Gateway,omitempty\""; Bridge string "json:\"Bridge,omitempty\" yaml:\"Bridge,omitempty\""; PortMapping map[string]@"".PortMapping "json:\"PortMapping,omitempty\" yaml:\"PortMapping,omitempty\""; Ports map[@"".Port][]@"".PortBinding "json:\"Ports,omitempty\" yaml:\"Ports,omitempty\""; NetworkID string "json:\"NetworkID,omitempty\" yaml:\"NetworkID,omitempty\""; EndpointID string "json:\"EndpointID,omitempty\" yaml:\"EndpointID,omitempty\""; SandboxKey string "json:\"SandboxKey,omitempty\" yaml:\"SandboxKey,omitempty\""; GlobalIPv6Address string "json:\"GlobalIPv6Address,omitempty\" yaml:\"GlobalIPv6Address,omitempty\""; GlobalIPv6PrefixLen int "json:\"GlobalIPv6PrefixLen,omitempty\" yaml:\"GlobalIPv6PrefixLen,omitempty\""; IPv6Gateway string "json:\"IPv6Gateway,omitempty\" yaml:\"IPv6Gateway,omitempty\""; LinkLocalIPv6Address string "json:\"LinkLocalIPv6Address,omitempty\" yaml:\"LinkLocalIPv6Address,omitempty\""; LinkLocalIPv6PrefixLen int "json:\"LinkLocalIPv6PrefixLen,omitempty\" yaml:\"LinkLocalIPv6PrefixLen,omitempty\""; SecondaryIPAddresses []string "json:\"SecondaryIPAddresses,omitempty\" yaml:\"SecondaryIPAddresses,omitempty\""; SecondaryIPv6Addresses []string "json:\"SecondaryIPv6Addresses,omitempty\" yaml:\"SecondaryIPv6Addresses,omitempty\"" } + func (@"".settings·2 *@"".NetworkSettings "esc:0x0") PortMappingAPI () (? []@"".APIPort) + type @"".KeyValuePair struct { Key string "json:\"Key,omitempty\" yaml:\"Key,omitempty\""; Value string "json:\"Value,omitempty\" yaml:\"Value,omitempty\"" } + type @"".RestartPolicy struct { Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; MaximumRetryCount int "json:\"MaximumRetryCount,omitempty\" yaml:\"MaximumRetryCount,omitempty\"" } + type @"".Device struct { PathOnHost string "json:\"PathOnHost,omitempty\" yaml:\"PathOnHost,omitempty\""; PathInContainer string "json:\"PathInContainer,omitempty\" yaml:\"PathInContainer,omitempty\""; CgroupPermissions string "json:\"CgroupPermissions,omitempty\" yaml:\"CgroupPermissions,omitempty\"" } + type @"".LogConfig struct { Type string "json:\"Type,omitempty\" yaml:\"Type,omitempty\""; Config map[string]string "json:\"Config,omitempty\" yaml:\"Config,omitempty\"" } + type @"".ULimit struct { Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; Soft int64 "json:\"Soft,omitempty\" yaml:\"Soft,omitempty\""; Hard int64 "json:\"Hard,omitempty\" yaml:\"Hard,omitempty\"" } + type @"".HostConfig struct { Binds []string "json:\"Binds,omitempty\" yaml:\"Binds,omitempty\""; CapAdd []string "json:\"CapAdd,omitempty\" yaml:\"CapAdd,omitempty\""; CapDrop []string "json:\"CapDrop,omitempty\" yaml:\"CapDrop,omitempty\""; ContainerIDFile string "json:\"ContainerIDFile,omitempty\" yaml:\"ContainerIDFile,omitempty\""; LxcConf []@"".KeyValuePair "json:\"LxcConf,omitempty\" yaml:\"LxcConf,omitempty\""; Privileged bool "json:\"Privileged,omitempty\" yaml:\"Privileged,omitempty\""; PortBindings map[@"".Port][]@"".PortBinding "json:\"PortBindings,omitempty\" yaml:\"PortBindings,omitempty\""; Links []string "json:\"Links,omitempty\" yaml:\"Links,omitempty\""; PublishAllPorts bool "json:\"PublishAllPorts,omitempty\" yaml:\"PublishAllPorts,omitempty\""; DNS []string "json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""; DNSSearch []string "json:\"DnsSearch,omitempty\" yaml:\"DnsSearch,omitempty\""; ExtraHosts []string "json:\"ExtraHosts,omitempty\" yaml:\"ExtraHosts,omitempty\""; VolumesFrom []string "json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""; NetworkMode string "json:\"NetworkMode,omitempty\" yaml:\"NetworkMode,omitempty\""; IpcMode string "json:\"IpcMode,omitempty\" yaml:\"IpcMode,omitempty\""; PidMode string "json:\"PidMode,omitempty\" yaml:\"PidMode,omitempty\""; UTSMode string "json:\"UTSMode,omitempty\" yaml:\"UTSMode,omitempty\""; RestartPolicy @"".RestartPolicy "json:\"RestartPolicy,omitempty\" yaml:\"RestartPolicy,omitempty\""; Devices []@"".Device "json:\"Devices,omitempty\" yaml:\"Devices,omitempty\""; LogConfig @"".LogConfig "json:\"LogConfig,omitempty\" yaml:\"LogConfig,omitempty\""; ReadonlyRootfs bool "json:\"ReadonlyRootfs,omitempty\" yaml:\"ReadonlyRootfs,omitempty\""; SecurityOpt []string "json:\"SecurityOpt,omitempty\" yaml:\"SecurityOpt,omitempty\""; CgroupParent string "json:\"CgroupParent,omitempty\" yaml:\"CgroupParent,omitempty\""; Memory int64 "json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""; MemorySwap int64 "json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""; CPUShares int64 "json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""; CPUSet string "json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""; CPUQuota int64 "json:\"CpuQuota,omitempty\" yaml:\"CpuQuota,omitempty\""; CPUPeriod int64 "json:\"CpuPeriod,omitempty\" yaml:\"CpuPeriod,omitempty\""; Ulimits []@"".ULimit "json:\"Ulimits,omitempty\" yaml:\"Ulimits,omitempty\"" } + type @"".Container struct { ID string "json:\"Id\" yaml:\"Id\""; Created @"time".Time "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; Path string "json:\"Path,omitempty\" yaml:\"Path,omitempty\""; Args []string "json:\"Args,omitempty\" yaml:\"Args,omitempty\""; Config *@"".Config "json:\"Config,omitempty\" yaml:\"Config,omitempty\""; State @"".State "json:\"State,omitempty\" yaml:\"State,omitempty\""; Image string "json:\"Image,omitempty\" yaml:\"Image,omitempty\""; Node *@"".SwarmNode "json:\"Node,omitempty\" yaml:\"Node,omitempty\""; NetworkSettings *@"".NetworkSettings "json:\"NetworkSettings,omitempty\" yaml:\"NetworkSettings,omitempty\""; SysInitPath string "json:\"SysInitPath,omitempty\" yaml:\"SysInitPath,omitempty\""; ResolvConfPath string "json:\"ResolvConfPath,omitempty\" yaml:\"ResolvConfPath,omitempty\""; HostnamePath string "json:\"HostnamePath,omitempty\" yaml:\"HostnamePath,omitempty\""; HostsPath string "json:\"HostsPath,omitempty\" yaml:\"HostsPath,omitempty\""; LogPath string "json:\"LogPath,omitempty\" yaml:\"LogPath,omitempty\""; Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; Driver string "json:\"Driver,omitempty\" yaml:\"Driver,omitempty\""; Volumes map[string]string "json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""; VolumesRW map[string]bool "json:\"VolumesRW,omitempty\" yaml:\"VolumesRW,omitempty\""; HostConfig *@"".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\""; ExecIDs []string "json:\"ExecIDs,omitempty\" yaml:\"ExecIDs,omitempty\""; RestartCount int "json:\"RestartCount,omitempty\" yaml:\"RestartCount,omitempty\""; AppArmorProfile string "json:\"AppArmorProfile,omitempty\" yaml:\"AppArmorProfile,omitempty\"" } + type @"".CreateContainerOptions struct { Name string; Config *@"".Config "qs:\"-\""; HostConfig *@"".HostConfig "qs:\"-\"" } + type @"".TopResult struct { Titles []string; Processes [][]string } + type @"".BlkioStatsEntry struct { Major uint64 "json:\"major,omitempty\" yaml:\"major,omitempty\""; Minor uint64 "json:\"minor,omitempty\" yaml:\"minor,omitempty\""; Op string "json:\"op,omitempty\" yaml:\"op,omitempty\""; Value uint64 "json:\"value,omitempty\" yaml:\"value,omitempty\"" } + type @"".CPUStats struct { CPUUsage struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" } "json:\"cpu_usage,omitempty\" yaml:\"cpu_usage,omitempty\""; SystemCPUUsage uint64 "json:\"system_cpu_usage,omitempty\" yaml:\"system_cpu_usage,omitempty\""; ThrottlingData struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" } "json:\"throttling_data,omitempty\" yaml:\"throttling_data,omitempty\"" } + type @"".Stats struct { Read @"time".Time "json:\"read,omitempty\" yaml:\"read,omitempty\""; Network struct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" } "json:\"network,omitempty\" yaml:\"network,omitempty\""; MemoryStats struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" } "json:\"memory_stats,omitempty\" yaml:\"memory_stats,omitempty\""; BlkioStats struct { IOServiceBytesRecursive []@"".BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []@"".BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []@"".BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []@"".BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []@"".BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []@"".BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []@"".BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []@"".BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" } "json:\"blkio_stats,omitempty\" yaml:\"blkio_stats,omitempty\""; CPUStats @"".CPUStats "json:\"cpu_stats,omitempty\" yaml:\"cpu_stats,omitempty\""; PreCPUStats @"".CPUStats "json:\"precpu_stats,omitempty\"" } + type @"".StatsOptions struct { ID string; Stats chan<- *@"".Stats; Stream bool; Done <-chan bool; Timeout @"time".Duration } + type @"".Signal int + type @"".KillContainerOptions struct { ID string "qs:\"-\""; Signal @"".Signal } + type @"".RemoveContainerOptions struct { ID string "qs:\"-\""; RemoveVolumes bool "qs:\"v\""; Force bool } + type @"".CopyFromContainerOptions struct { OutputStream @"io".Writer "json:\"-\""; Container string "json:\"-\""; Resource string } + type @"".Image struct { ID string "json:\"Id\" yaml:\"Id\""; Parent string "json:\"Parent,omitempty\" yaml:\"Parent,omitempty\""; Comment string "json:\"Comment,omitempty\" yaml:\"Comment,omitempty\""; Created @"time".Time "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; Container string "json:\"Container,omitempty\" yaml:\"Container,omitempty\""; ContainerConfig @"".Config "json:\"ContainerConfig,omitempty\" yaml:\"ContainerConfig,omitempty\""; DockerVersion string "json:\"DockerVersion,omitempty\" yaml:\"DockerVersion,omitempty\""; Author string "json:\"Author,omitempty\" yaml:\"Author,omitempty\""; Config *@"".Config "json:\"Config,omitempty\" yaml:\"Config,omitempty\""; Architecture string "json:\"Architecture,omitempty\" yaml:\"Architecture,omitempty\""; Size int64 "json:\"Size,omitempty\" yaml:\"Size,omitempty\""; VirtualSize int64 "json:\"VirtualSize,omitempty\" yaml:\"VirtualSize,omitempty\"" } + type @"".CommitContainerOptions struct { Container string; Repository string "qs:\"repo\""; Tag string; Message string "qs:\"m\""; Author string; Run *@"".Config "qs:\"-\"" } + type @"".AttachToContainerOptions struct { Container string "qs:\"-\""; InputStream @"io".Reader "qs:\"-\""; OutputStream @"io".Writer "qs:\"-\""; ErrorStream @"io".Writer "qs:\"-\""; Logs bool; Stream bool; Stdin bool; Stdout bool; Stderr bool; Success chan struct {}; RawTerminal bool "qs:\"-\"" } + type @"".LogsOptions struct { Container string "qs:\"-\""; OutputStream @"io".Writer "qs:\"-\""; ErrorStream @"io".Writer "qs:\"-\""; Follow bool; Stdout bool; Stderr bool; Since int64; Timestamps bool; Tail string; RawTerminal bool "qs:\"-\"" } + type @"".ExportContainerOptions struct { ID string; OutputStream @"io".Writer } + type @"".Exec struct { ID string "json:\"Id,omitempty\" yaml:\"Id,omitempty\"" } + type @"".CreateExecOptions struct { AttachStdin bool "json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\""; AttachStdout bool "json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\""; AttachStderr bool "json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\""; Tty bool "json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""; Cmd []string "json:\"Cmd,omitempty\" yaml:\"Cmd,omitempty\""; Container string "json:\"Container,omitempty\" yaml:\"Container,omitempty\""; User string "json:\"User,omitempty\" yaml:\"User,omitempty\"" } + type @"".StartExecOptions struct { Detach bool "json:\"Detach,omitempty\" yaml:\"Detach,omitempty\""; Tty bool "json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""; InputStream @"io".Reader "qs:\"-\""; OutputStream @"io".Writer "qs:\"-\""; ErrorStream @"io".Writer "qs:\"-\""; RawTerminal bool "qs:\"-\""; Success chan struct {} "json:\"-\"" } + type @"".ExecProcessConfig struct { Privileged bool "json:\"privileged,omitempty\" yaml:\"privileged,omitempty\""; User string "json:\"user,omitempty\" yaml:\"user,omitempty\""; Tty bool "json:\"tty,omitempty\" yaml:\"tty,omitempty\""; EntryPoint string "json:\"entrypoint,omitempty\" yaml:\"entrypoint,omitempty\""; Arguments []string "json:\"arguments,omitempty\" yaml:\"arguments,omitempty\"" } + type @"".ExecInspect struct { ID string "json:\"ID,omitempty\" yaml:\"ID,omitempty\""; Running bool "json:\"Running,omitempty\" yaml:\"Running,omitempty\""; ExitCode int "json:\"ExitCode,omitempty\" yaml:\"ExitCode,omitempty\""; OpenStdin bool "json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""; OpenStderr bool "json:\"OpenStderr,omitempty\" yaml:\"OpenStderr,omitempty\""; OpenStdout bool "json:\"OpenStdout,omitempty\" yaml:\"OpenStdout,omitempty\""; ProcessConfig @"".ExecProcessConfig "json:\"ProcessConfig,omitempty\" yaml:\"ProcessConfig,omitempty\""; Container @"".Container "json:\"Container,omitempty\" yaml:\"Container,omitempty\"" } + type @"".APIImages struct { ID string "json:\"Id\" yaml:\"Id\""; RepoTags []string "json:\"RepoTags,omitempty\" yaml:\"RepoTags,omitempty\""; Created int64 "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; Size int64 "json:\"Size,omitempty\" yaml:\"Size,omitempty\""; VirtualSize int64 "json:\"VirtualSize,omitempty\" yaml:\"VirtualSize,omitempty\""; ParentID string "json:\"ParentId,omitempty\" yaml:\"ParentId,omitempty\""; RepoDigests []string "json:\"RepoDigests,omitempty\" yaml:\"RepoDigests,omitempty\""; Labels map[string]string "json:\"Labels,omitempty\" yaml:\"Labels,omitempty\"" } + type @"".ListImagesOptions struct { All bool; Filters map[string][]string; Digests bool } + type @"".ImageHistory struct { ID string "json:\"Id\" yaml:\"Id\""; Tags []string "json:\"Tags,omitempty\" yaml:\"Tags,omitempty\""; Created int64 "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; CreatedBy string "json:\"CreatedBy,omitempty\" yaml:\"CreatedBy,omitempty\""; Size int64 "json:\"Size,omitempty\" yaml:\"Size,omitempty\"" } + type @"".RemoveImageOptions struct { Force bool "qs:\"force\""; NoPrune bool "qs:\"noprune\"" } + type @"".PushImageOptions struct { Name string; Tag string; Registry string; OutputStream @"io".Writer "qs:\"-\""; RawJSONStream bool "qs:\"-\"" } + type @"".PullImageOptions struct { Repository string "qs:\"fromImage\""; Registry string; Tag string; OutputStream @"io".Writer "qs:\"-\""; RawJSONStream bool "qs:\"-\"" } + type @"".LoadImageOptions struct { InputStream @"io".Reader } + type @"".ExportImageOptions struct { Name string; OutputStream @"io".Writer } + type @"".ExportImagesOptions struct { Names []string; OutputStream @"io".Writer "qs:\"-\"" } + type @"".ImportImageOptions struct { Repository string "qs:\"repo\""; Source string "qs:\"fromSrc\""; Tag string "qs:\"tag\""; InputStream @"io".Reader "qs:\"-\""; OutputStream @"io".Writer "qs:\"-\""; RawJSONStream bool "qs:\"-\"" } + type @"".BuildImageOptions struct { Name string "qs:\"t\""; Dockerfile string "qs:\"dockerfile\""; NoCache bool "qs:\"nocache\""; SuppressOutput bool "qs:\"q\""; Pull bool "qs:\"pull\""; RmTmpContainer bool "qs:\"rm\""; ForceRmTmpContainer bool "qs:\"forcerm\""; Memory int64 "qs:\"memory\""; Memswap int64 "qs:\"memswap\""; CPUShares int64 "qs:\"cpushares\""; CPUSetCPUs string "qs:\"cpusetcpus\""; InputStream @"io".Reader "qs:\"-\""; OutputStream @"io".Writer "qs:\"-\""; RawJSONStream bool "qs:\"-\""; Remote string "qs:\"remote\""; Auth @"".AuthConfiguration "qs:\"-\""; AuthConfigs @"".AuthConfigurations "qs:\"-\""; ContextDir string "qs:\"-\"" } + type @"".TagImageOptions struct { Repo string; Tag string; Force bool } + type @"".APIImageSearch struct { Description string "json:\"description,omitempty\" yaml:\"description,omitempty\""; IsOfficial bool "json:\"is_official,omitempty\" yaml:\"is_official,omitempty\""; IsAutomated bool "json:\"is_automated,omitempty\" yaml:\"is_automated,omitempty\""; Name string "json:\"name,omitempty\" yaml:\"name,omitempty\""; StarCount int "json:\"star_count,omitempty\" yaml:\"star_count,omitempty\"" } + type @"".Env []string + func (@"".env·2 *@"".Env) Decode (@"".src·3 @"io".Reader) (? error) + func (@"".env·2 *@"".Env "esc:0x0") Exists (@"".key·3 string "esc:0x0") (? bool) + func (@"".env·2 *@"".Env "esc:0x0") Get (@"".key·3 string "esc:0x0") (@"".value·1 string) + func (@"".env·2 *@"".Env "esc:0x0") GetBool (@"".key·3 string "esc:0x0") (@"".value·1 bool) + func (@"".env·2 *@"".Env "esc:0x0") GetInt (@"".key·3 string "esc:0x0") (? int) + func (@"".env·2 *@"".Env "esc:0x0") GetInt64 (@"".key·3 string "esc:0x0") (? int64) + func (@"".env·2 *@"".Env "esc:0x0") GetJSON (@"".key·3 string "esc:0x0", @"".iface·4 interface {}) (? error) + func (@"".env·2 *@"".Env "esc:0x0") GetList (@"".key·3 string "esc:0x0") (? []string) + func (@"".env·2 *@"".Env "esc:0x0") Map () (? map[string]string) + func (@"".env·1 *@"".Env) Set (@"".key·2 string "esc:0x0", @"".value·3 string "esc:0x0") { *@"".env·1 = append(*@"".env·1, @"".key·2 + "=" + @"".value·3) } + func (@"".env·1 *@"".Env) SetAuto (@"".key·2 string "esc:0x0", @"".value·3 interface {}) + func (@"".env·1 *@"".Env) SetBool (@"".key·2 string "esc:0x0", @"".value·3 bool) + func (@"".env·1 *@"".Env) SetInt (@"".key·2 string "esc:0x0", @"".value·3 int) + func (@"".env·1 *@"".Env) SetInt64 (@"".key·2 string "esc:0x0", @"".value·3 int64) + func (@"".env·2 *@"".Env) SetJSON (@"".key·3 string "esc:0x0", @"".value·4 interface {}) (? error) + func (@"".env·2 *@"".Env) SetList (@"".key·3 string "esc:0x0", @"".value·4 []string) (? error) + type @"".Endpoint struct { Name string "json:\"name\""; ID string "json:\"id\""; Network string "json:\"network\"" } + type @"".Network struct { Name string "json:\"name\""; ID string "json:\"id\""; Type string "json:\"type\""; Endpoints []*@"".Endpoint "json:\"endpoints\"" } + type @"".CreateNetworkOptions struct { Name string "json:\"name\""; NetworkType string "json:\"network_type\""; Options map[string]interface {} "json:\"options\"" } + type @"".Client struct { SkipServerVersionCheck bool; HTTPClient *@"net/http".Client; TLSConfig *@"crypto/tls".Config; @"".endpoint string; @"".endpointURL *@"net/url".URL; @"".eventMonitor *@"".eventMonitoringState; @"".requestedAPIVersion @"".APIVersion; @"".serverAPIVersion @"".APIVersion; @"".expectedAPIVersion @"".APIVersion } + func (@"".c·2 *@"".Client) AddEventListener (@"".listener·3 chan<- *@"".APIEvents) (? error) + func (@"".c·2 *@"".Client) AttachToContainer (@"".opts·3 @"".AttachToContainerOptions) (? error) + func (@"".c·2 *@"".Client) AuthCheck (@"".conf·3 *@"".AuthConfiguration) (? error) + func (@"".c·2 *@"".Client) BuildImage (@"".opts·3 @"".BuildImageOptions) (? error) + func (@"".c·3 *@"".Client) CommitContainer (@"".opts·4 @"".CommitContainerOptions) (? *@"".Image, ? error) + func (@"".c·3 *@"".Client) ContainerChanges (@"".id·4 string) (? []@"".Change, ? error) + func (@"".c·2 *@"".Client) CopyFromContainer (@"".opts·3 @"".CopyFromContainerOptions) (? error) + func (@"".c·3 *@"".Client) CreateContainer (@"".opts·4 @"".CreateContainerOptions) (? *@"".Container, ? error) + func (@"".c·3 *@"".Client) CreateExec (@"".opts·4 @"".CreateExecOptions) (? *@"".Exec, ? error) + func (@"".c·3 *@"".Client) CreateNetwork (@"".opts·4 @"".CreateNetworkOptions) (? *@"".Network, ? error) + func (@"".c·2 *@"".Client) ExportContainer (@"".opts·3 @"".ExportContainerOptions) (? error) + func (@"".c·2 *@"".Client) ExportImage (@"".opts·3 @"".ExportImageOptions) (? error) + func (@"".c·2 *@"".Client) ExportImages (@"".opts·3 @"".ExportImagesOptions) (? error) + func (@"".c·3 *@"".Client) ImageHistory (@"".name·4 string "esc:0x0") (? []@"".ImageHistory, ? error) + func (@"".c·2 *@"".Client) ImportImage (@"".opts·3 @"".ImportImageOptions) (? error) + func (@"".c·3 *@"".Client) Info () (? *@"".Env, ? error) + func (@"".c·3 *@"".Client) InspectContainer (@"".id·4 string) (? *@"".Container, ? error) + func (@"".c·3 *@"".Client) InspectExec (@"".id·4 string) (? *@"".ExecInspect, ? error) + func (@"".c·3 *@"".Client) InspectImage (@"".name·4 string "esc:0x0") (? *@"".Image, ? error) + func (@"".c·2 *@"".Client) KillContainer (@"".opts·3 @"".KillContainerOptions) (? error) + func (@"".c·3 *@"".Client) ListContainers (@"".opts·4 @"".ListContainersOptions) (? []@"".APIContainers, ? error) + func (@"".c·3 *@"".Client) ListImages (@"".opts·4 @"".ListImagesOptions) (? []@"".APIImages, ? error) + func (@"".c·3 *@"".Client) ListNetworks () (? []@"".Network, ? error) + func (@"".c·2 *@"".Client) LoadImage (@"".opts·3 @"".LoadImageOptions) (? error) + func (@"".c·2 *@"".Client) Logs (@"".opts·3 @"".LogsOptions) (? error) + func (@"".c·3 *@"".Client) NetworkInfo (@"".id·4 string) (? *@"".Network, ? error) + func (@"".c·2 *@"".Client) PauseContainer (@"".id·3 string) (? error) + func (@"".c·2 *@"".Client) Ping () (? error) + func (@"".c·2 *@"".Client) PullImage (@"".opts·3 @"".PullImageOptions, @"".auth·4 @"".AuthConfiguration) (? error) + func (@"".c·2 *@"".Client) PushImage (@"".opts·3 @"".PushImageOptions, @"".auth·4 @"".AuthConfiguration) (? error) + func (@"".c·2 *@"".Client) RemoveContainer (@"".opts·3 @"".RemoveContainerOptions) (? error) + func (@"".c·2 *@"".Client) RemoveEventListener (@"".listener·3 chan *@"".APIEvents "esc:0x0") (? error) + func (@"".c·2 *@"".Client) RemoveImage (@"".name·3 string "esc:0x0") (? error) + func (@"".c·2 *@"".Client) RemoveImageExtended (@"".name·3 string, @"".opts·4 @"".RemoveImageOptions) (? error) + func (@"".c·2 *@"".Client) RenameContainer (@"".opts·3 @"".RenameContainerOptions) (? error) + func (@"".c·2 *@"".Client) ResizeContainerTTY (@"".id·3 string "esc:0x0", @"".height·4 int, @"".width·5 int) (? error) + func (@"".c·2 *@"".Client) ResizeExecTTY (@"".id·3 string, @"".height·4 int, @"".width·5 int) (? error) + func (@"".c·2 *@"".Client) RestartContainer (@"".id·3 string, @"".timeout·4 uint) (? error) + func (@"".c·3 *@"".Client) SearchImages (@"".term·4 string "esc:0x0") (? []@"".APIImageSearch, ? error) + func (@"".c·2 *@"".Client) StartContainer (@"".id·3 string, @"".hostConfig·4 *@"".HostConfig) (? error) + func (@"".c·2 *@"".Client) StartExec (@"".id·3 string, @"".opts·4 @"".StartExecOptions) (? error) + func (@"".c·2 *@"".Client) Stats (@"".opts·3 @"".StatsOptions) (@"".retErr·1 error) + func (@"".c·2 *@"".Client) StopContainer (@"".id·3 string, @"".timeout·4 uint) (? error) + func (@"".c·2 *@"".Client) TagImage (@"".name·3 string "esc:0x0", @"".opts·4 @"".TagImageOptions) (? error) + func (@"".c·3 *@"".Client) TopContainer (@"".id·4 string, @"".psArgs·5 string) (? @"".TopResult, ? error) + func (@"".c·2 *@"".Client) UnpauseContainer (@"".id·3 string) (? error) + func (@"".c·3 *@"".Client) Version () (? *@"".Env, ? error) + func (@"".c·3 *@"".Client) WaitContainer (@"".id·4 string) (? int, ? error) + func (@"".c·2 *@"".Client) @"".checkAPIVersion () (? error) + func (@"".c·2 *@"".Client) @"".createImage (@"".qs·3 string "esc:0x0", @"".headers·4 map[string]string, @"".in·5 @"io".Reader, @"".w·6 @"io".Writer, @"".rawJSONStream·7 bool) (? error) + func (@"".c·4 *@"".Client) @"".do (@"".method·5 string, @"".path·6 string, @"".doOptions·7 @"".doOptions) (? []byte, ? int, ? error) + func (@"".c·2 *@"".Client) @"".eventHijack (@"".startTime·3 int64, @"".eventChan·4 chan *@"".APIEvents, @"".errChan·5 chan error) (? error) + func (@"".c·3 *@"".Client) @"".getServerAPIVersionString () (@"".version·1 string, @"".err·2 error) + func (@"".c·2 *@"".Client) @"".getURL (@"".path·3 string) (? string) + func (@"".c·2 *@"".Client) @"".hijack (@"".method·3 string, @"".path·4 string, @"".hijackOptions·5 @"".hijackOptions) (? error) + func (@"".c·2 *@"".Client) @"".stream (@"".method·3 string, @"".path·4 string, @"".streamOptions·5 @"".streamOptions) (? error) + func @"".NewClient (@"".endpoint·3 string) (? *@"".Client, ? error) + func @"".NewTLSClient (@"".endpoint·3 string, @"".cert·4 string, @"".key·5 string, @"".ca·6 string) (? *@"".Client, ? error) + func @"".NewTLSClientFromBytes (@"".endpoint·3 string, @"".certPEMBlock·4 []byte, @"".keyPEMBlock·5 []byte, @"".caPEMCert·6 []byte) (? *@"".Client, ? error) + func @"".NewVersionedClient (@"".endpoint·3 string, @"".apiVersionString·4 string) (? *@"".Client, ? error) + func @"".NewVersionnedTLSClient (@"".endpoint·3 string, @"".cert·4 string, @"".key·5 string, @"".ca·6 string, @"".apiVersionString·7 string) (? *@"".Client, ? error) + func @"".NewVersionedTLSClient (@"".endpoint·3 string, @"".cert·4 string, @"".key·5 string, @"".ca·6 string, @"".apiVersionString·7 string) (? *@"".Client, ? error) + func @"".NewClientFromEnv () (? *@"".Client, ? error) + func @"".NewVersionedClientFromEnv (@"".apiVersionString·3 string) (? *@"".Client, ? error) + func @"".NewVersionedTLSClientFromBytes (@"".endpoint·3 string, @"".certPEMBlock·4 []byte, @"".keyPEMBlock·5 []byte, @"".caPEMCert·6 []byte, @"".apiVersionString·7 string) (? *@"".Client, ? error) + type @"".Error struct { Status int; Message string } + func (@"".e·2 *@"".Error) Error () (? string) + var @"".ErrContainerAlreadyExists error + func @"".AlwaysRestart () (? @"".RestartPolicy) { return (@"".RestartPolicy{ Name:"always" }) } + func @"".RestartOnFailure (@"".maxRetry·2 int) (? @"".RestartPolicy) { return (@"".RestartPolicy{ Name:"on-failure", MaximumRetryCount:@"".maxRetry·2 }) } + func @"".NeverRestart () (? @"".RestartPolicy) { return (@"".RestartPolicy{ Name:"no" }) } + type @"".NoSuchContainer struct { ID string; Err error } + func (@"".err·2 *@"".NoSuchContainer) Error () (? string) + type @"".ContainerAlreadyRunning struct { ID string } + func (@"".err·2 *@"".ContainerAlreadyRunning "esc:0x0") Error () (? string) { return "Container already running: " + @"".err·2.ID } + type @"".ContainerNotRunning struct { ID string } + func (@"".err·2 *@"".ContainerNotRunning "esc:0x0") Error () (? string) { return "Container not running: " + @"".err·2.ID } + var @"".ErrNoListeners error + var @"".ErrListenerAlreadyExists error + var @"".EOFEvent *@"".APIEvents + type @"".NoSuchExec struct { ID string } + func (@"".err·2 *@"".NoSuchExec "esc:0x0") Error () (? string) { return "No such exec instance: " + @"".err·2.ID } + type @"".ImagePre012 struct { ID string "json:\"id\""; Parent string "json:\"parent,omitempty\""; Comment string "json:\"comment,omitempty\""; Created @"time".Time "json:\"created\""; Container string "json:\"container,omitempty\""; ContainerConfig @"".Config "json:\"container_config,omitempty\""; DockerVersion string "json:\"docker_version,omitempty\""; Author string "json:\"author,omitempty\""; Config *@"".Config "json:\"config,omitempty\""; Architecture string "json:\"architecture,omitempty\""; Size int64 "json:\"size,omitempty\"" } + var @"".ErrNoSuchImage error + var @"".ErrMissingRepo error + var @"".ErrMissingOutputStream error + var @"".ErrMultipleContexts error + var @"".ErrMustSpecifyNames error + func @"".ParseRepositoryTag (@"".repoTag·3 string "esc:0x2") (@"".repository·1 string, @"".tag·2 string) + var @"".ErrNetworkAlreadyExists error + type @"".NoSuchNetwork struct { ID string } + func (@"".err·2 *@"".NoSuchNetwork) Error () (? string) + const @"".SIGABRT @"".Signal = 0x6 + const @"".SIGALRM @"".Signal = 0xE + const @"".SIGBUS @"".Signal = 0x7 + const @"".SIGCHLD @"".Signal = 0x11 + const @"".SIGCLD @"".Signal = 0x11 + const @"".SIGCONT @"".Signal = 0x12 + const @"".SIGFPE @"".Signal = 0x8 + const @"".SIGHUP @"".Signal = 0x1 + const @"".SIGILL @"".Signal = 0x4 + const @"".SIGINT @"".Signal = 0x2 + const @"".SIGIO @"".Signal = 0x1D + const @"".SIGIOT @"".Signal = 0x6 + const @"".SIGKILL @"".Signal = 0x9 + const @"".SIGPIPE @"".Signal = 0xD + const @"".SIGPOLL @"".Signal = 0x1D + const @"".SIGPROF @"".Signal = 0x1B + const @"".SIGPWR @"".Signal = 0x1E + const @"".SIGQUIT @"".Signal = 0x3 + const @"".SIGSEGV @"".Signal = 0xB + const @"".SIGSTKFLT @"".Signal = 0x10 + const @"".SIGSTOP @"".Signal = 0x13 + const @"".SIGSYS @"".Signal = 0x1F + const @"".SIGTERM @"".Signal = 0xF + const @"".SIGTRAP @"".Signal = 0x5 + const @"".SIGTSTP @"".Signal = 0x14 + const @"".SIGTTIN @"".Signal = 0x15 + const @"".SIGTTOU @"".Signal = 0x16 + const @"".SIGUNUSED @"".Signal = 0x1F + const @"".SIGURG @"".Signal = 0x17 + const @"".SIGUSR1 @"".Signal = 0xA + const @"".SIGUSR2 @"".Signal = 0xC + const @"".SIGVTALRM @"".Signal = 0x1A + const @"".SIGWINCH @"".Signal = 0x1C + const @"".SIGXCPU @"".Signal = 0x18 + const @"".SIGXFSZ @"".Signal = 0x19 + func @"".init () + var @"time".months [12]string + var @"time".days [7]string + var @"time".Local *@"time".Location + var @"time".UTC *@"time".Location + var @"bufio".ErrInvalidUnreadRune error + var @"net/http".DefaultTransport @"net/http".RoundTripper + var @"crypto/tls".defaultCurvePreferences []@"crypto/tls".CurveID + import rand "crypto/rand" // indirect + var @"crypto/rand".Reader @"io".Reader + type @"sync".rlocker struct { @"sync".w @"sync".Mutex; @"sync".writerSem uint32; @"sync".readerSem uint32; @"sync".readerCount int32; @"sync".readerWait int32 } + func (@"sync".r·1 *@"sync".rlocker) Lock () + func (@"sync".r·1 *@"sync".rlocker) Unlock () + +$$ +_go_.6 0 0 0 644 1173651 ` +go object darwin amd64 go1.4.2 X:precisestack + +! +go13ldbytes.a"encoding/base64.aencoding/json.aerrors.a +fmt.aio.aos.a path.astrings.abufio.acrypto/tls.acrypto/x509.aio/ioutil.a +net.anet/http.a&net/http/httputil.anet/url.apath/filepath.areflect.aruntime.astrconv.a time.a”github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.a¢github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir.a¢github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.a math.a sync.async/atomic.a¢github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.a¦github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.aþJ"".NewAuthConfigurationsFromDockerCfg šeH‹ %HD$˜H;AwèëåHìèHÇ„$øHÇ„$HÇD$0HÇD$8HÇD$`HÇD$hHH,$H‰ïH‰ÞH¥H¥èL‹L$L‹D$Hœ$¸Hƒû„òH-H‰ßH‰îèHÇÅHÇÂH‰¬$ˆH‰”$H‰œ$€H‰$L‰L$pL‰L$L‰D$xL‰D$èH‹œ$€H‰$H‹œ$ˆH‰\$H‹œ$H‰\$èH‹T$H‹D$ H‰T$@H‰D$HH‹H‰D$(1íH9è„H‹\$@H‰$H‹\$HH‰\$èH‹D$H‹T$H‹\$ H‰\$hH‰D$8H‹l$(H‰l$0HƒúH‰T$`„AHH,$H‰ïH‰ÞH¥H¥èL‹L$L‹D$Hœ$˜Hƒû„‰H-H‰ßH‰îèHÇÁHÇÂH‰Œ$ˆH‰”$H‰œ$€H‰$L‰L$pL‰L$L‰D$xL‰D$èH‹œ$€H‰$H‹œ$ˆH‰\$H‹œ$H‰\$èH‹L$H‹D$ H‰L$PH‰D$XH‹H‰D$(1íH9脧H‹\$PH‰$H‹\$XH‰\$èH‹D$H‹L$H‹T$ H‰T$hH‹l$(HƒùH‰L$`t$HÇ„$ðH‰Œ$øH‰”$HÄèÃH‰l$0H‰,$H‰D$8H‰D$èH‹T$H‹L$H‹D$ H‰”$ðH‰Œ$øH‰„$HÄèÃHH‰$HH‰\$HH‰\$èH‹\$H‰\$(é"ÿÿÿ‰épþÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$(é¹ýÿÿ‰éýÿÿ6 +*0runtime.morestack_noctxt go.string."HOME"èos.Getenv®""".statictmp_0068ÄØ runtime.duffcopyÊ4runtime.writebarrierstring path.JoinÖ4go.itab.*os.File.io.Reader¦os.Open˜ go.string."HOME"¾os.Getenv„""".statictmp_0073š runtime.duffcopy 4runtime.writebarrierstringöpath.Join¬ 4go.itab.*os.File.io.Readerü os.Open¼ 0"".NewAuthConfigurations¨ type.*os.File¾ type.io.ReaderÖ 4go.itab.*os.File.io.Readerê  runtime.typ2Itab¤ type.*os.Fileº type.io.ReaderÒ 4go.itab.*os.File.io.Readeræ  runtime.typ2Itab0Ð "".autotmp_0076type.*uint8"".autotmp_0075type.*os.File"".autotmp_0072type.[]string"".autotmp_0071ÿtype.*uint8"".autotmp_0067Ïtype.[]string"".autotmp_0064type.string"".autotmp_0063type.string"".autotmp_0062Ÿtype.[2]string"".autotmp_0060ïtype.string"".autotmp_0059_type.[3]string"".p¯type.string"".pÏtype.string "".errtype.error"".rïtype.io.Reader "".~r1type.error "".~r06type.*"".AuthConfigurations&"ЧÏÐFÏÐB\:ÊRÊH $G 77 +2sq+CLq ++C`W>Tgclocals·b8a8407971613b03b21a64dc1e56fba0Tgclocals·2e2a9972ea9ced3a58f9e7510cf4914dè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0"".NewAuthConfigurations šeH‹ %H;awèëêHƒì8HÇD$XHÇD$`H‹\$@H‰$H‹\$HH‰\$èH‹L$H‹D$H‹T$ H‰T$0HƒøH‰D$(tHÇD$PH‰D$XH‰T$`HƒÄ8ÃH‰ $èH‹T$H‹D$H‹L$HƒøtHÇD$PH‰D$XH‰L$`HƒÄ8ÃH‰T$PHÇD$XHÇD$`HƒÄ8à + 0runtime.morestack_noctxt€("".parseDockerConfig€"".authConfigsPp "".errtype.error "".~r20type.error "".~r1 6type.*"".AuthConfigurations"".rtype.io.Readerp`op5opoÐ"~,,  ?‘Tgclocals·9edc1f6d8fc7336ae101b48cbf822a45Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ("".parseDockerConfig€ ê +eH‹ %HD$¸H;AwèëåHìÈHÇ„$èHÇ„$ðHH‰$èH‹D$H‰D$8H‰$H‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹L$8H‹yH‹QH‹AH9‚H‹ H‰ÖH)ÆH‰úH)ÂHƒút H‰ÃHËH‰ÙH‰Œ$°H‰´$¸H‰”$ÀH‰Œ$€H‰Œ$˜H‰´$ˆH‰´$ H‰”$H‰”$¨HH‰$èH‹L$H‰L$@H‹œ$˜H‰$H‹œ$ H‰\$H‹œ$¨H‰\$HH‰D$`H‰D$H‰L$hH‰L$ èH‹L$(H‹D$0H‰D$XHƒùH‰L$PuyHH‹ H‹CHH‰$H‹\$@H‹+H‰l$H‰L$pH‰L$H‰D$xH‰D$èH‹D$ ¶\$(H‹(€ût(H‰¬$àHÇ„$èHÇ„$ðHÄÈÃHH‰$èH‹L$H‰L$HH‹œ$˜H‰$H‹œ$ H‰\$H‹œ$¨H‰\$HH‰D$`H‰D$H‰L$hH‰L$ èH‹D$(H‹L$0Hƒøt$HÇ„$àH‰„$èH‰Œ$ðHÄÈÃH‹\$HH‹+H‰¬$àHÇ„$èHÇ„$ðHÄÈÃè " +*0runtime.morestack_noctxtz"type.bytes.BufferŒ"runtime.newobjectæ0bytes.(*Buffer).ReadFromôTtype.map[string]map[string]"".dockerConfig†"runtime.newobjectôVtype.*map[string]map[string]"".dockerConfig¦.encoding/json.Unmarshalè"go.string."auths"„Ttype.map[string]map[string]"".dockerConfigØ4runtime.mapaccess2_faststrÚ>type.map[string]"".dockerConfigì"runtime.newobjectÚ@type.*map[string]"".dockerConfigŒ .encoding/json.UnmarshalÞ +$runtime.panicsliceP"".autotmp_0091/type.[]uint8"".autotmp_0090type.error"".autotmp_0089@type.*map[string]"".dockerConfig"".autotmp_0088¯type.string"".&confsÿ@type.*map[string]"".dockerConfig "".&confsWrapperVtype.*map[string]map[string]"".dockerConfig "".~r0type.[]uint8 "".errïtype.error"".byteData_type.[]uint8 "".bufŸ$type.*bytes.Buffer "".~r20type.error "".~r1 >type.map[string]"".dockerConfig"".rtype.io.Reader4"Ç“/À:˜:(€eQ([$0&E-PYšiTgclocals·e94084972e98c8fdf7f2203a35ca807aTgclocals·74b1ee12d224f81a4fda25605fab855dè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ"".authConfigsàÚeH‹ %H„$øþÿÿH;AwèëâHìˆHÇ„$ HÇ„$¨HH‰$HÇD$èH‹\$H‰\$PHH‰$èH‹D$H‰D$HH‰$Hƒ<$„H‹\$PH‰\$èH‹\$HH‰\$@H‹Œ$H¼$81ÀèHH‰$H‰L$Hœ$8H‰\$èH‹œ$81íH9ë„jH‹´$@Hƒþ„šH¬$ØH‰ïèH‹´$8Hƒþ„qH‹.H‰l$XH‹~H‰|$`H´$ØH¼$¸èH‹5H‰4$H´$¸Hl$H‰ïH¥H¥èH‹l$H‹T$ H‹L$(H‹D$0H‹t$8H‰t$pHƒøH‰D$ht$HÇ„$˜H‰„$ H‰´$¨HĈÃH‰¬$ H‰,$H‰”$¨H‰T$H‰Œ$°H‰L$èH\$H,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥èL‹L$XL‹D$`H‹T$ H‹L$(H‹D$0H‰„$˜Hƒùt2HÇ„$˜H‹H‰œ$ H‹H‰œ$¨HĈÃL‰L$xL‰„$€H¼$ø1ÀèHœ$ÈH¬$H‰ïH‰ÞH¥H¥Hƒù†áH¬$øH‰ïH‰ÖH¥H¥H‰ÓH‰”$ˆHƒùH‰Œ$†«HƒÃH¬$H‰ïH‰ÞH¥H¥L‰Œ$(L‰„$0HH‰$H‹\$@H‹+H‰l$H\$xH‰\$Hœ$øH‰\$èHœ$8H‰$èH‹œ$81íH9ë…–ýÿÿH‹\$@H‰œ$˜HÇ„$ HÇ„$¨HĈÃè è ‰éˆýÿÿ‰é_ýÿÿ‰%éÜüÿÿ2 +00runtime.morestack_noctxt€Htype.map[string]"".AuthConfiguration¤runtime.makemapÆ4type."".AuthConfigurationsØ"runtime.newobject¨.runtime.writebarrierptrêØ runtime.duffzeroø>type.map[string]"".dockerConfig®&runtime.mapiterinit˜ runtime.duffcopyˆ runtime.duffcopy–6encoding/base64.StdEncodingÐPencoding/base64.(*Encoding).DecodeStringÀ2runtime.slicebytetostringôgo.string.":"œstrings.Split """.AuthParseError® """.AuthParseError† +à runtime.duffzero† Htype.map[string]"".AuthConfigurationà $runtime.mapassign1‚ &runtime.mapiternextŒ$runtime.panicindexš$runtime.panicindex@"".autotmp_0111ß(type."".dockerConfig"".autotmp_0110ÿ6type.*"".AuthConfigurations"".autotmp_0109Ÿ2type."".AuthConfiguration"".autotmp_0108Ÿtype.string"".autotmp_0105ŸHtype.map.iter[string]"".dockerConfig"".autotmp_0103ïHtype.map[string]"".AuthConfiguration"".userpassÿtype.[]string "".err¿type.error"".dataÏtype.[]uint8"".confŸ(type."".dockerConfig "".regßtype.string"".c6type.*"".AuthConfigurations "".~r2 type.error "".~r16type.*"".AuthConfigurations"".confs>type.map[string]"".dockerConfig6%“±™+°LÀ=[ +¦B $z2 +¼$ -*Q(C‘ÈE+Tgclocals·f565a1229afec041643831d3cd6a3b7dTgclocals·1c0f8a36a8ada2462e7c582fc8286897è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ,"".(*Client).AuthCheck€ € eH‹ %H„$hÿÿÿH;AwèëâHìH‹„$(HÇ„$0HÇ„$81íH9èuXHH,$H‰ïH‰ÞH¥H¥H\$HÇHÇCHÇCèH‹L$(H‹D$0H‰Œ$0H‰„$8HÄÃHœ$ÈHÇHÇCHÇCH‰ÁHH‰„$ˆH‰„$ÈH‰Œ$H‰Œ$ÐH‹´$ H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥Hœ$ÈHl$(H‰ïH‰ÞH¥H¥H¥èH‹|$@H‹t$HH‹l$PH‹D$XH‹L$`H‹T$hH‰”$€HƒùH‰L$xtH‰Œ$0H‰”$8HÄÃH=Ž‹H‰D$pH‰¼$˜H‰¼$àH‰´$ H‰´$èH‰¬$¨H‰¬$ðH¼$ø1ÀèHœ$øHƒû„.HÇÂHÇÁH‰œ$°H‰”$¸H‰Œ$ÀHH‰$H\$pH‰\$èH‹D$H‹L$H‹œ$°H‰$H‰„$ˆH‰D$H‰Œ$H‰L$èHH‰$Hœ$àH‰\$èH‹D$H‹L$H‹œ$°HƒÃH‰$H‰„$ˆH‰D$H‰Œ$H‰L$èHH,$H‰ïH‰ÞH¥H¥H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$ èH‹L$(H‹D$0H‰Œ$0H‰„$8HÄÉéËþÿÿHÇ„$0HÇ„$8HÄÃ" +00runtime.morestack_noctxtž.go.string."conf is nil"üfmt.Errorf’4type.*"".AuthConfigurationø go.string."POST"ž"go.string."/auth"ø"".(*Client).doªð runtime.duffzero¨type.intÎruntime.convT2E¸ 2runtime.writebarrierifaceÆ type.[]uint8ò runtime.convT2Eä +2runtime.writebarrierifaceò +>go.string."auth error (%d): %s"æ fmt.Errorf@°"".autotmp_0122"type.interface {}"".autotmp_0121"type.interface {}"".autotmp_0119Ï&type.[]interface {}"".autotmp_0118Ÿ"type."".doOptions"".autotmp_0117type.error"".autotmp_0116otype.[]uint8"".autotmp_0115Ïtype.int"".autotmp_0114?(type.[2]interface {} "".err¿type.error"".bodyÿtype.[]uint8 "".~r1 type.error"".conf4type.*"".AuthConfiguration"".ctype.*"".Client:%°~¯°ä¯°¯°&¯À,ôEX  ‹ }©ÌNTgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·25ee8e11891a6b427c03a740dc761f96è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ&"".(*Change).String€ìeH‹ %HD$àH;AwèëåHì HÇ„$°HÇ„$¸H‹œ$¨1Ò1ÉH‹kHƒý…lHH‹H‹KH‰T$8H‰T$XH‰L$@H‰L$`H¼$€1ÀèHœ$€Hƒû„"HÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$XH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$HH‰L$H‰D$PH‰D$èHH‰$H‹œ$¨H‰\$Hƒ|$„èH‹L$H‹D$H‹\$hHƒÃH‰$H‰L$HH‰L$H‰D$PH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$ èH‹L$(H‹D$0H‰Œ$°H‰„$¸HĠÉ%éeÿÿÿ‰é×þÿÿHƒýuHH‹H‹Ké‰þÿÿHƒý…þÿÿHH‹H‹Kélþÿÿ +*0runtime.morestack_noctxt®go.string."C"‚ð runtime.duffzeroîtype.string”runtime.convT2Eì2runtime.writebarrierifaceútype.string¾runtime.convT2Ež2runtime.writebarrieriface¬"go.string."%s %s"Žfmt.Sprintf’go.string."A"Ìgo.string."D"0À"".autotmp_0135"type.interface {}"".autotmp_0134¯"type.interface {}"".autotmp_0132o&type.[]interface {}"".autotmp_0129type.string"".autotmp_0128?(type.[2]interface {}"".kindÏtype.string "".~r0type.string"".changetype.*"".Change"ÀŠ¿ÀS€8B: + Þ  +ɽzTgclocals·6d340c3bdac448a6ef1256f331f68dd3Tgclocals·0b4080736ceb8b2da6f0b7e8a876e6b8ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.goþ "".NewAPIVersion ’eH‹ %H„$8ÿÿÿH;AwèëâHìHHÇ„$`HÇ„$hHÇ„$pHÇ„$xHÇ„$€H‹œ$PH‰$H‹´$XH‰t$H5Hl$H‰ïH¥H¥èH‹”$PH‹Œ$X¶\$ €û…?H‰”$¨H‰Œ$°Hœ$¸HÇHÇCHœ$¸Hƒû„ÿHÇÂHÇÁH‰œ$àH‰”$èH‰Œ$ðHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$àH‰$H‰Œ$ˆH‰L$H‰„$H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$ èH‹L$(H‹D$0HÇ„$`HÇ„$hHÇ„$pH‰Œ$xH‰„$€HÄHÉéúþÿÿH‰$H‰L$HHl$H‰ïH‰ÞH¥H¥èH‹T$ H‹L$(H‹D$0H‰”$H‰„$ H‰Œ$HH‰$H‰L$H‰L$èH‹t$H‹T$ H‹D$(H‰´$ÈH‰”$ÐH‰„$ØHÇD$hHÇD$pH‹¬$H‹Œ$H‹œ$ H‰œ$1ÀH‰Œ$H‰L$HH‰¬$øH‰éH‹l$HH9èfH‰ËH‰L$PHƒù„›H‹ H‹kH‰D$@H‰D$8H‰L$xH‰¬$€H‰L$XH‰ $H‰l$`H‰l$èH‹´$ÈH‹”$ÐH‹l$8H9Õƒ@HîH‹l$H‰+H‹D$H‹\$ H‰\$pHƒøH‰D$h„·H‹œ$PH‰œ$¨H‹œ$XH‰œ$°H‹\$XH‰œ$˜H‹\$`H‰œ$ H¼$(1ÀèHœ$(Hƒû„UHÇÁHÇÂH‰œ$àH‰Œ$èH‰”$ðHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$àH‰$H‰Œ$ˆH‰L$H‰„$H‰D$èHH‰$Hœ$˜H‰\$èH‹L$H‹D$H‹œ$àHƒÃH‰$H‰Œ$ˆH‰L$H‰„$H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$ èH‹L$(H‹D$0HÇ„$`HÇ„$hHÇ„$pH‰Œ$xH‰„$€HÄHÉé¤þÿÿH‹L$PH‹D$@HƒÁHÿÀH‹l$HH9茚ýÿÿH‰´$`H‰”$hH‹œ$ØH‰œ$pHÇ„$xHÇ„$€HÄHÃè ‰é^ýÿÿ0 +00runtime.morestack_noctxtúgo.string."."œ strings.Contains¤type.stringÐruntime.convT2Eº2runtime.writebarrierifaceÈLgo.string."Unable to parse version %q"¼fmt.Errorfögo.string."."žstrings.Splitú$type."".APIVersion  "runtime.makeslice² strconv.AtoiÔð runtime.duffzeroÒtype.stringþruntime.convT2Eè2runtime.writebarrierifaceötype.string¢runtime.convT2E”2runtime.writebarrieriface¢xgo.string."Unable to parse version %q: %q is not an integer"–fmt.Errorfø$runtime.panicindexp4"".autotmp_0161"type.interface {}"".autotmp_0160"type.interface {}"".autotmp_0158&type.[]interface {}"".autotmp_0157Ÿtype.string"".autotmp_0156ïtype.*string"".autotmp_0155type.int"".autotmp_0154type.int"".autotmp_0153ÿ"type.interface {}"".autotmp_0151Ï&type.[]interface {}"".autotmp_0150type.error"".autotmp_0149ßtype.string"".autotmp_0148type.string"".autotmp_0147?(type.[2]interface {}"".autotmp_0146type.[]string"".autotmp_0144type.int"".autotmp_0143Ÿtype.[]string"".autotmp_0141¿type.string"".autotmp_0140Ÿ(type.[1]interface {} "".valßtype.string"".iŸtype.int "".err¿type.error "".retÿ$type."".APIVersion "".arrotype.[]string "".~r2Ptype.error "".~r1 $type."".APIVersion"".inputtype.string4%¬e >taO¿CIxQ· @ 6švqAÉæ̱ Tgclocals·56fad8922133a82d7e9abffb05067a58Tgclocals·979c84cf2ee7fa703a7cd5365c579635ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ("".APIVersion.String „eH‹ %HD$èH;AwèëåHì˜HÇ„$¸HÇ„$À1É1ÀH‹´$ H‹¬$¨H‹œ$°H‰œ$1ÒH‰¬$ˆH‰l$8H‰´$€H‹l$8H9êåH‰t$HH‹.H‰T$@H‰T$0H‰L$PH‰L$pH‰D$XH‰D$xH‰,$èH‹L$H‹D$H‹\$pH‰$H‹\$xH‰\$H‰L$`H‰L$H‰D$hH‰D$èH‹L$ H‹D$(H‹œ$¨HÿËH‹l$0H9ë~yH‰L$PH‰D$XH‰L$`H‰ $H‰D$hH‰D$HHl$H‰ïH‰ÞH¥H¥èH‹L$ H‹D$(H‹t$HH‹T$@HƒÆHÿÂH‹l$8H9êŒÿÿÿH‰Œ$¸H‰„$ÀHĘÃëÇ +*0runtime.morestack_noctxtÜstrconv.ItoaÈ*runtime.concatstring2Îgo.string."."ö*runtime.concatstring2P°"".autotmp_0175Ÿtype.*int"".autotmp_0174¿type.int"".autotmp_0173¯type.int"".autotmp_0172type.string"".autotmp_0171type.int"".autotmp_0170otype.string"".autotmp_0169Otype.string"".autotmp_0168/$type."".APIVersion"".iÏtype.int "".strtype.string "".~r00type.string"".version$type."".APIVersion"°Ý¯°(”:W]B  ­6­Tgclocals·c45f1008acf31f9ce337f7dfa1fa0204Tgclocals·37f4150aca71c16b472a5e6f54a4a2bcì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ,"".APIVersion.LessThan€ìeH‹ %H;awèëêHƒì8H‹\$@H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(èH‹\$0Hƒû| +ÆD$pHƒÄ8ÃÆD$pëô + 0runtime.morestack_noctxt¬*"".APIVersion.comparepp "".~r1`type.bool"".other0$type."".APIVersion"".version$type."".APIVersionpTop€ ¬f +U+Tgclocals·d83eab2a3f0aa562c88b153605ebed26Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ>"".APIVersion.LessThanOrEqualTo€ìeH‹ %H;awèëêHƒì8H‹\$@H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(èH‹\$0Hƒû~ +ÆD$pHƒÄ8ÃÆD$pëô + 0runtime.morestack_noctxt¬*"".APIVersion.comparepp "".~r1`type.bool"".other0$type."".APIVersion"".version$type."".APIVersionpTop€ ¶f +U+Tgclocals·d83eab2a3f0aa562c88b153605ebed26Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ2"".APIVersion.GreaterThan€ìeH‹ %H;awèëêHƒì8H‹\$@H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(èH‹\$0Hƒû +ÆD$pHƒÄ8ÃÆD$pëô + 0runtime.morestack_noctxt¬*"".APIVersion.comparepp "".~r1`type.bool"".other0$type."".APIVersion"".version$type."".APIVersionpTop€ Àf +U+Tgclocals·d83eab2a3f0aa562c88b153605ebed26Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþD"".APIVersion.GreaterThanOrEqualTo€ìeH‹ %H;awèëêHƒì8H‹\$@H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(èH‹\$0Hƒû} +ÆD$pHƒÄ8ÃÆD$pëô + 0runtime.morestack_noctxt¬*"".APIVersion.comparepp "".~r1`type.bool"".other0$type."".APIVersion"".version$type."".APIVersionpTop€ Êf +U+Tgclocals·d83eab2a3f0aa562c88b153605ebed26Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ*"".APIVersion.compareàÜeH‹ %H;awèëêL‹L$L‹T$ H‹|$(H‹L$H‹\$1ÀM‰ËL9È}DH‹1H‰ûHÿËH9Ã|eH9øsYIÂH‹H9Ö} +HÇD$8ÿÿÿÿÃH9Ö~ +HÇD$8ÃHƒÁHÿÀL9È|¼I9û~ +HÇD$8ÃI9û} +HÇD$8ÿÿÿÿÃHÇD$8Ãè ëà + 0runtime.morestack_noctxtÌ$runtime.panicindexp"".autotmp_0186type.int"".autotmp_0185type.int"".autotmp_0184type.int"".autotmp_0183type.int"".autotmp_0182type.int "".~r1`type.int"".other0$type."".APIVersion"".version$type."".APIVersion°°DÒ%   + +  + + + ¥ Tgclocals·d83eab2a3f0aa562c88b153605ebed26Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ"".NewClientàÖeH‹ %H;awèëêHƒì8HÇD$XHÇD$`H‹\$@H‰$H‹\$HH‰\$H\$HÇHÇCèH‹L$ H‹D$(H‹T$0HƒøtHÇD$PH‰D$XH‰T$`HƒÄ8ÃHÇÅ@ˆ)H‰L$PHÇD$XHÇD$`HƒÄ8à + 0runtime.morestack_noctxt¨*"".NewVersionedClientPp "".~r20type.error "".~r1 type.*"".Client"".endpointtype.stringpjop%o° ,; +! +S]Tgclocals·5dfce38b1d248a3900c6ec750de77702Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ"".NewTLSClientÀ¢eH‹ %H;awèëêHƒìhHÇ„$¸HÇ„$ÀH‹\$pH‰$H‹\$xH‰\$H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ H‹œ$˜H‰\$(H‹œ$ H‰\$0H‹œ$¨H‰\$8H\$@HÇHÇCèH‹L$PH‹D$XH‹T$`Hƒøt!HÇ„$°H‰„$¸H‰”$ÀHƒÄhÃHÇÅ@ˆ)H‰Œ$°HÇ„$¸HÇ„$ÀHƒÄhà + 0runtime.morestack_noctxtÐ0"".NewVersionedTLSClient°Ð "".~r5type.error "".~r4€type.*"".Client +"".ca`type.string "".key@type.string"".cert type.string"".endpointtype.string ÐÇÏÐ.Ï ¸2‰! +4 §yTgclocals·8e51ba8a606dfe7bf8ea610f35b1860aTgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ0"".NewTLSClientFromBytes ŽeH‹ %H;awèëêHì€HÇ„$èHÇ„$ðH‹œ$ˆH‰$H‹œ$H‰\$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$ H‹œ$°H‰\$(H‹œ$¸H‰\$0H‹œ$ÀH‰\$8H‹œ$ÈH‰\$@H‹œ$ÐH‰\$HH‹œ$ØH‰\$PH\$XHÇHÇCèH‹L$hH‹D$pH‹T$xHƒøt$HÇ„$àH‰„$èH‰”$ðHÄ€ÃHÇÅ@ˆ)H‰Œ$àHÇ„$èHÇ„$ðHĀà + 0runtime.morestack_noctxt°B"".NewVersionedTLSClientFromBytesà€ "".~r5Àtype.error "".~r4°type.*"".Client"".caPEMCert€type.[]uint8"".keyPEMBlockPtype.[]uint8"".certPEMBlock type.[]uint8"".endpointtype.string €÷ÿ€1ÿ +ÐÐ5¶$ +1 ×yTgclocals·a3afb5a83dcf14cc57a3d3da3be3a7dfTgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ*"".NewVersionedClient  † eH‹ %H;awèëêHƒìxHÇ„$¨HÇ„$°H‹œ$€H‰$H‹œ$ˆH‰\$ÆD$èH‹\$H‰\$8H‹D$ H‹L$(H‰L$XHƒøH‰D$Pt!HÇ„$ H‰„$¨H‰Œ$°HƒÄxÃHÇD$`HÇD$hHÇD$pH‹œ$H‰$H‹´$˜H‰t$H5Hl$H‰ïH¥H¥è¶\$ €û„ +H‹œ$H‰$H‹œ$˜H‰\$èH‹\$H‰\$`H‹\$H‰\$hH‹\$ H‰\$pH‹D$(H‹L$0H‰L$XHƒøH‰D$Pt!HÇ„$ H‰„$¨H‰Œ$°HƒÄxÃHH‰$èH‹\$H‰\$HHH‰$èH‹L$H‰ÏHƒù„P1ÀèH‰L$@H‰ $Hƒ<$„)Hƒ$H‹H‰\$èH‹\$@H‰$Hƒ<$„óHƒ$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹\$@H‰$Hƒ<$„¯Hƒ$(H‹\$8H‰\$èH‹\$@H‰$Hƒ<$tHƒ$0H‹\$HH‰\$èH‹\$@H‰$Hƒ<$tRHƒ$8H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èH‹\$@H‰œ$ HÇ„$¨HÇ„$°HƒÄxÉ%륉%éuÿÿÿ‰%éEÿÿÿ‰%éÿÿÿ‰%éËþÿÿ‰é©þÿÿéhþÿÿ" + 0runtime.morestack_noctxt¢ "".parseEndpoint¢go.string."."Ä strings.Containsœ "".NewAPIVersionÜ8type."".eventMonitoringStateî"runtime.newobjecttype."".Client¢"runtime.newobjectÔÀ runtime.duffzero”,net/http.DefaultClient¨.runtime.writebarrierptr˜4runtime.writebarrierstringè.runtime.writebarrierptr° .runtime.writebarrierptr  +2runtime.writebarrierslicepð"".autotmp_0193otype.*"".Client"".autotmp_0192_:type.*"".eventMonitoringState,"".requestedAPIVersion/$type."".APIVersion "".errOtype.error"".u"type.*net/url.URL "".~r3Ptype.error "".~r2@type.*"".Client&"".apiVersionString type.string"".endpointtype.string4ðïðÐïðÒïðR>æ2< !?K !*@.P‘,iC`$8Tgclocals·677e212df4ff2dc5d1bd7207f0cb343fTgclocals·63a71a9d82a0cb5094b44aef6b6fe396ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ2"".NewVersionnedTLSClientÀ¼eH‹ %H;awèëêHƒìhHÇ„$ÈHÇ„$ÐH‹\$pH‰$H‹\$xH‰\$H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ H‹œ$˜H‰\$(H‹œ$ H‰\$0H‹œ$¨H‰\$8H‹œ$°H‰\$@H‹œ$¸H‰\$HèH‹T$PH‹L$XH‹D$`H‰”$ÀH‰Œ$ÈH‰„$ÐHƒÄhà + 0runtime.morestack_noctxtÜ0"".NewVersionedTLSClientÐÐ "".~r6°type.error "".~r5 type.*"".Client&"".apiVersionString€type.string +"".ca`type.string "".key@type.string"".cert type.string"".endpointtype.stringÐÃÏà’2® ­3Tgclocals·c984d5bd78e9da313cca302adec9d408Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ0"".NewVersionedTLSClientÀ +² +eH‹ %HD$¨H;AwèëåHìØHÇ„$8HÇ„$@H‹œ$ðH‰$H‹œ$øH‰\$èH‹\$H‰œ$¨H‹\$H‰œ$°H‹\$ H‰œ$¸H‹D$(H‹L$0H‰Œ$ˆHƒøH‰„$€t$HÇ„$0H‰„$8H‰Œ$@HÄØÃH‹œ$H‰$H‹œ$H‰\$èH‹\$H‰œ$H‹\$H‰œ$˜H‹\$ H‰œ$ H‹D$(H‹L$0H‰Œ$ˆHƒøH‰„$€t$HÇ„$0H‰„$8H‰Œ$@HÄØÃH‹œ$H‰$H‹œ$H‰\$èH‹l$H‹T$H‹L$ H‹D$(H‹t$0H‰´$ˆHƒøH‰„$€t$HÇ„$0H‰„$8H‰´$@HÄØÃH‹œ$àH‰$H‹œ$èH‰\$H‹œ$¨H‰\$H‹œ$°H‰\$H‹œ$¸H‰\$ H‹œ$H‰\$(H‹œ$˜H‰\$0H‹œ$ H‰\$8H‰¬$ÀH‰l$@H‰”$ÈH‰T$HH‰Œ$ÐH‰L$PH‹œ$ H‰\$XH‹œ$(H‰\$`èH‹T$hH‹L$pH‹D$xH‰”$0H‰Œ$8H‰„$@HÄØà +*0runtime.morestack_noctxt¨$io/ioutil.ReadFileº$io/ioutil.ReadFileÌ$io/ioutil.ReadFileÌ B"".NewVersionedTLSClientFromBytesа"".caPEMCert/type.[]uint8"".keyPEMBlocktype.[]uint8 "".err¯type.error"".certPEMBlock_type.[]uint8 "".~r6°type.error "".~r5 type.*"".Client&"".apiVersionString€type.string +"".ca`type.string "".key@type.string"".cert type.string"".endpointtype.string<"° ¯°ˆ¯°p¯°Û¯ 2ž:W$W$?$ãS‰‰€;Tgclocals·e11b7011fe7d18f281fa367784f98637Tgclocals·f27fde19da2a9a9e0264e00d44cbb36aì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ&"".NewClientFromEnvÀ®eH‹ %H;awèëêHƒì(HÇD$8HÇD$@H$HÇHÇCèH‹L$H‹D$H‹T$ HƒøtHÇD$0H‰D$8H‰T$@HƒÄ(ÃHÇÅ@ˆ)H‰L$0HÇD$8HÇD$@HƒÄ(à + 0runtime.morestack_noctxt€8"".NewVersionedClientFromEnv0P "".~r1type.error "".~r0type.*"".ClientPVOP%O + È,' +% +?aTgclocals·0528ab8f76149a707fd2f0025c2178a3Tgclocals·3280bececceccd33cb74587feedb1f9fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ8"".NewVersionedClientFromEnv€êeH‹ %H„$(ÿÿÿH;AwèëâHìXHÇ„$xHÇ„$€èH‹ $H‹D$H‹T$H‰”$ˆHƒøH‰„$€t$HÇ„$pH‰„$xH‰”$€HÄXÃHƒù„H‹H‹AH‰L$h¶Y€û„šH‰”$H‰$H‰„$˜H‰D$HHl$H‰ïH‰ÞH¥H¥HÇD$ èH‹t$(H‹l$0H‹D$8H‰´$ðH‰„$H‰¬$øHƒý„7H‹œ$H‰œ$ÐH‹œ$˜H‰œ$ØHœ$àHÇHÇCHœ$àHƒû„çHÇÂHÇÁH‰œ$H‰”$H‰Œ$HH‰$Hœ$ÐH‰\$èH‹L$H‹D$H‹œ$H‰$H‰Œ$ÀH‰L$H‰„$ÈH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$ èH‹L$(H‹D$0HÇ„$pH‰Œ$xH‰„$€HÄXÉéÿÿÿHœ$àHÇHÇCHœ$àHƒû„ÅHÇÂHÇÁH‰œ$H‰”$H‰Œ$HH‰$H‰óHƒý†€HƒÃH‰\$èH‹T$H‹D$H‹œ$H‰$H‰”$ÀH‰T$H‰„$ÈH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$ èH‹T$(H‹D$0H‰”$H‰„$˜Hœ$8Hƒû„ÉH-H‰ßH‰îèHÇÆHÇÂH‰´$(H‰”$0H‰œ$ H‰$H‹|$hHƒÿ„wHwH|$H¥H¥èH‹œ$ H‰$H‹œ$(H‰\$H‹œ$0H‰\$èH‹T$H‹D$ H‰”$ H‰„$¨Hœ$8Hƒû„H-H‰ßH‰îèHÇÆHÇÂH‰´$(H‰”$0H‰œ$ H‰$H‹|$hHƒÿ„µHwH|$H¥H¥èH‹œ$ H‰$H‹œ$(H‰\$H‹œ$0H‰\$èH‹T$H‹D$ H‰T$pH‰D$xHœ$8Hƒû„KH-H‰ßH‰îèHÇÂHÇÁH‰”$(H‰Œ$0H‰œ$ H‰$H‹|$hHƒÿ„ùHoH|$H‰îH¥H¥èH‹œ$ H‰$H‹œ$(H‰\$H‹œ$0H‰\$èH‹L$H‹D$ H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹\$pH‰\$ H‹\$xH‰\$(H‰Œ$°H‰L$0H‰„$¸H‰D$8H‹œ$`H‰\$@H‹œ$hH‰\$HèH‹D$PH‹T$XH‹L$`H‰„$pH‰”$xH‰Œ$€HÄXÉéÿÿÿ‰é®þÿÿ‰éDþÿÿ‰éòýÿÿ‰é‚ýÿÿ‰é0ýÿÿè ‰é4üÿÿH‰”$H‰$H‰„$˜H‰D$H‹œ$`H‰\$H‹œ$hH‰\$èH‹D$ H‹T$(H‹L$0H‰„$pH‰”$xH‰Œ$€HÄXÉéßùÿÿ< +00runtime.morestack_noctxt|"".getDockerEnv’go.string."://"Ìstrings.SplitNštype.stringÆruntime.convT2E°2runtime.writebarrieriface¾hgo.string."could not split %s into two parts by ://"²fmt.ErrorfÈ +type.string† runtime.convT2Eð 2runtime.writebarrierifaceþ ,go.string."https://%s"ò fmt.SprintfØ """.statictmp_0222î  runtime.duffcopy„4runtime.writebarrierstringÚ$path/filepath.JoinÀ""".statictmp_0225Ö runtime.duffcopyì4runtime.writebarrierstringÂ$path/filepath.Joinœ""".statictmp_0228² runtime.duffcopyÎ4runtime.writebarrierstring¤$path/filepath.Join¸0"".NewVersionedTLSClientô$runtime.panicindexö*"".NewVersionedClientP°B"".autotmp_0229type.*[2]string"".autotmp_0227type.[]string"".autotmp_0226type.*[2]string"".autotmp_0224type.[]string"".autotmp_0221type.[]string"".autotmp_0220"type.interface {}"".autotmp_0219*type.*[1]interface {}"".autotmp_0218&type.[]interface {}"".autotmp_0217¯"type.interface {}"".autotmp_0215Ÿ&type.[]interface {}"".autotmp_0214type.error"".autotmp_0213type.*"".Client"".autotmp_0212type.error"".autotmp_0210type.string"".autotmp_0209type.[2]string"".autotmp_0208type.string"".autotmp_0207type.[2]string"".autotmp_0206type.string"".autotmp_0205?type.[2]string"".autotmp_0203(type.[1]interface {}"".autotmp_0201type.string"".autotmp_0200ï(type.[1]interface {}"".autotmp_0198otype.[]string +"".caÏtype.string "".keyÏtype.string"".certïtype.string"".partsÏtype.[]string"".dockerHosttype.string "".err¯type.error"".dockerEnvß$type.*"".dockerEnv "".~r20type.error "".~r1 type.*"".Client&"".apiVersionStringtype.stringB%°d¯°À¯°Ã¯°ž¯°À `æ=$\·…´®§¯ gP=¨½vªv‰+‰+†+Š^AFTgclocals·7df0f47b43308a447b1c5362b1e10571Tgclocals·bae70cbfa95aa7f2f402b02d37b0b239ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþB"".NewVersionedTLSClientFromBytes€+è*eH‹ %H„$xþÿÿH;AwèëâHìHÇ„$€HÇ„$ˆH‹œ$H‰$H‹œ$H‰\$ÆD$èH‹\$H‰œ$ˆH‹D$ H‹L$(H‰Œ$0HƒøH‰„$(t$HÇ„$xH‰„$€H‰Œ$ˆHÄÃHÇ„$HHÇ„$PHÇ„$XH‹œ$hH‰$H‹´$pH‰t$H5Hl$H‰ïH¥H¥è¶\$ €û„› H‹œ$hH‰$H‹œ$pH‰\$èH‹\$H‰œ$HH‹\$H‰œ$PH‹\$ H‰œ$XH‹D$(H‹L$0H‰Œ$0HƒøH‰„$(t$HÇ„$xH‰„$€H‰Œ$ˆHÄÃHƒ¼$ „ýHƒ¼$8„îH‹œ$ H‰$H‹œ$(H‰\$H‹œ$0H‰\$H‹œ$8H‰\$H‹œ$@H‰\$ H‹œ$HH‰\$(èHœ$xHl$0H‰ßH‰îèH‹D$xH‹Œ$€H‰Œ$0HƒøH‰„$(t$HÇ„$xH‰„$€H‰Œ$ˆHÄÃHH‰$èH‹|$H‰¼$ÀHƒÿ„1ÀèHH‰$èH‹D$Hƒø„âHÇÇHÇÂH‰¼$hH‰”$pHœ$xH¼$ÀH‰ÞèHH‰$H‰„$`H‰D$Hœ$ÀH‰\$èH‹œ$ÀH‰$Hƒ<$„_Hƒ$H‹œ$`H‰\$H‹œ$hH‰\$H‹œ$pH‰\$èH‹œ$ÀH‰œ$Hƒ¼$P…†HÇÅ@ˆ«€HH‰$èH‹|$H‰ùHƒÿ„O1ÀèH‰Œ$°H‰ $Hƒ<$„%Hƒ$hH‹œ$H‰\$èH‹Œ$(H‹œ$°Hƒùt,HÇ„$xH‰Œ$€H‹œ$0H‰œ$ˆHÄÃH‰œ$°HH‰$èH‹\$H‰œ$ÐHH‰$èH‹|$H‰¼$¨Hƒÿ„y1ÀèHH‰$èH‹|$H‰ùH‰¼$ Hƒÿ„A1ÀèH‹1íH9è„îH‰ $Hƒ<$„ÓH‹Œ$°H‰„$8H‰D$H‰Œ$@H‰L$èH‹œ$¨H‰$Hƒ<$„‰Hƒ$H‹œ$ H‰\$èH‹œ$¨H‰$Hƒ<$„OHƒ$H‹œ$H‰\$èH‹œ$¨H‰$Hƒ<$„Hƒ$H‹œ$H‰\$H‹œ$H‰\$èH‹œ$¨H‰$Hƒ<$„ÎHƒ$(H‹œ$ˆH‰\$èH‹œ$¨H‰$Hƒ<$„”Hƒ$0H‹œ$ÐH‰\$èH‹œ$¨H‰$Hƒ<$taHƒ$8H‹œ$HH‰\$H‹œ$PH‰\$H‹œ$XH‰\$èH‹œ$¨H‰œ$xHÇ„$€HÇ„$ˆHÄÉ%ë–‰%é`ÿÿÿ‰%é&ÿÿÿ‰%éßþÿÿ‰%é¥þÿÿ‰%ékþÿÿ‰%é!þÿÿHH‰$HH‰\$HH‰\$èH‹Œ$ H‹D$éØýÿÿ‰é¸ýÿÿ‰é€ýÿÿ‰%éÏüÿÿ‰éªüÿÿHH‰$HÇD$èH‹\$H‰œ$àHH‰$HÇD$èH‹\$H‰œ$ØHH‰$èH‹D$H‰„$¸H‰$Hƒ<$„H‹œ$àH‰\$èH‹œ$¸H‰$Hƒ<$„ÐHƒ$H‹œ$ØH‰\$èH‹„$¸Hƒø„ HhHÇEHÇEHÇEH‰„$˜H‰$H‹œ$PH‰\$H‹œ$XH‰\$H‹œ$`H‰\$è¶\$ €û…HH‹+H‰¬$H‹kH‰¬$ HÇ„$øHÇ„$HH‰$èH‹D$H‰„$ÈH‰$Hƒ<$„˜H‹œ$H‰\$H‹œ$ H‰\$èH‹œ$ÈH‰œ$ÈH‹1íH9èt,H‹”$ÈHÇ„$xH‰„$€H‰”$ˆHÄÃHH‰$HH‰\$HH‰\$èH‹D$륉%é\ÿÿÿH‹œ$H‰$Hƒ<$tHƒ$@H‹œ$˜H‰\$èé'úÿÿ‰%ëÛ‰éYþÿÿ‰%é$þÿÿ‰%éïýÿÿ‰%é•ùÿÿ‰éùÿÿ‰éêøÿÿHH‹+H‰¬$H‹kH‰¬$HÇ„$èHÇ„$ðHH‰$èH‹D$H‰„$ÈH‰$Hƒ<$„˜H‹œ$H‰\$H‹œ$H‰\$èH‹œ$ÈH‰œ$ÈH‹1íH9èt,H‹Œ$ÈHÇ„$xH‰„$€H‰Œ$ˆHÄÃHH‰$HH‰\$HH‰\$èH‹D$륉%é\ÿÿÿééöÿÿŠ +00runtime.morestack_noctxt¸ "".parseEndpointâgo.string."."„ strings.ContainsÜ "".NewAPIVersion’,crypto/tls.X509KeyPair„ runtime.duffcopyÞ ,type.crypto/tls.Configð "runtime.newobject¬ +ü runtime.duffzeroº +go.itab.*bytes.Buffer.io.Reader¢1$type.*bytes.Buffer¸1type.io.ReaderÐ1>go.itab.*bytes.Buffer.io.Readerä1 runtime.typ2ItabàÐ@"".autotmp_0312type.*uint8"".autotmp_0311ÿtype.*"".Error"".autotmp_0310type.*"".Error"".autotmp_0308ï$type.*bytes.Buffer"".autotmp_0307$type.*bytes.Buffer"".autotmp_0306type.*"".Error"".autotmp_0304type.string"".autotmp_0303type.error"".autotmp_0301ÿtype.string"".autotmp_0299$type.*bytes.Buffer"".body/type.[]uint8"".status¿type.intbytes.buf·2¿type.[]uint8"".body_type.[]uint8"".breader$type.*bufio.Reader"".dial¿type.net.Conn"".addressßtype.string"".protocolßtype.string"".resp¯.type.*net/http.Response "".errßtype.error "".reqŸ,type.*net/http.Request "".errÿtype.error "".errŸtype.error "".buftype.[]uint8"".params¿type.io.Reader "".~r5Àtype.error "".~r4°type.int "".~r3€type.[]uint8"".doOptionsP"type."".doOptions"".path0type.string"".methodtype.string"".ctype.*"".ClientÎ%ÐÊÏÐóÏÐŽÏÐîÏÐ]ÏÐ=éÏÐcÏÐÏÐRÏÐÏÐûÏЯÏÐõäÞm Q žNC8 +#.DaN%L^N/R\^O…NJÎ 6!A8!(#NTN®B‚ã +•®Ö[# ‡ W˜«:©ž=™1"«„iáYyTgclocals·162ebaa0b58a36a0548c1b1cb0cd3669Tgclocals·e420e0cb0a7fa3debafcec3aabd8dd0aì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ&"".(*Client).stream X’XeH‹ %H„$°ýÿÿH;AwèëâHìÐH‹„$èHÇ„$HHÇ„$PHƒø…tH‹´$àH‰4$H‰D$H5LD$L‰ÇH¥H¥èH‹„$è¶\$ €û„5Hƒ¼$…ÖHÇ„$ÐHÇ„$ØHÇ„$àHH‰$èH‹D$H‰„$H‰$Hƒ<$„ÊH‹œ$ÐH‰\$H‹œ$ØH‰\$H‹œ$àH‰\$èH‹„$HÇ@HÇ@ ÿÿÿÿH‰„$H‹1íH9è„:H‹Œ$H‰„$€H‰„$H‰Œ$ˆH‰Œ$H‹œ$øHƒû……H‹¬$ðH‰,$H‹´$øH‰t$H5LD$L‰ÇH¥H¥è¶\$ €û„FH‹œ$ØH‰$H‹œ$ðH‰\$H‹œ$øH‰\$èH‹T$H‹L$ H‹œ$àH‰$H‹´$èH‰t$H‰”$ H‰T$H‰Œ$¨H‰L$H´$Hl$ H‰ïH¥H¥èH‹l$0H‹L$8H‹T$@H‰”$8HƒùH‰Œ$0tH‰Œ$HH‰”$PèHÄÐÃH‰l$`H‹}8H‰<$HH|$H‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹„$èHƒøukH‹´$àH‰4$H‰D$H5LD$L‰ÇH¥H¥è¶\$ €ût8H‹t$`H‹~8H‰<$H5H|$H¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹Œ$H¼$€1ÀèHH‰$H‰L$Hœ$€H‰\$èH‹œ$€1íH9넬H‹œ$ˆHƒû„iH‹ H‹CH‹œ$€Hƒû„IH‹3H‹SH‰Œ$ H‰„$¨H‹\$`H‹k8H‰,$H‰´$ðH‰t$H‰”$øH‰T$H‰Œ$°H‰L$H‰„$¸H‰D$ èHœ$€H‰$èH‹œ$€1íH9ë…TÿÿÿH‹œ$ØH‹k(Hƒý„ªH‹UH‰”$àH‹MH‹œ$ØH‹k(Hƒý„|H‹]8H‰œ$ÀH‹]@H‰œ$ÈHƒ¼$ uH‹H‰œ$ H‹H‰œ$(Hƒ¼$0uH‹H‰œ$0H‹H‰œ$8H‰Œ$èHƒù… H‰$H‰L$H-LD$L‰ÇH‰îH¥H¥è¶\$ €û„ÚH‹œ$àH‰$H‹œ$èH‰\$H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹t$ H‰´$PH‹l$(H‰¬$XH‹L$0H‹T$8H‰”$HƒùH‰Œ$tH‰Œ$HH‰”$PèHÄÐÃH‰,$Hƒþ„5H^ SjèYYH…À…HH‰$H‹œ$PH‰\$H‹œ$XH‰\$èH\$H,$H‰ïH‰ÞH¥H¥èH‹\$H‰\$pHH‰$H‹œ$PH‰\$H‹œ$XH‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$`H‰$èH‹L$H‹T$ H‰”$HƒùH‰Œ$tH‰Œ$HH‰”$PèHÄÐÃH‹œ$@HƒûŽ•èH‹,$‹T$H‹L$H‰¬$HH‰,$‰”$P‰T$H‰Œ$XH‰L$H‹œ$@H‰\$èH‹l$ ‹T$(H‹L$0H‰¬$0H‰l$‰”$8‰T$H‰Œ$@H‰L$H‹œ$XH‰$H‹œ$PH‹[@ÿÓH‹\$pH‰$H‹\$`H‰\$èH‹L$H‹T$H‹\$ H‰œ$HƒúH‰”$„H‹œ$@Hƒû~M1í1Ò1ÉH‰¬$HH‰l$‰”$P‰T$H‰Œ$XH‰L$H‹œ$XH‰$H‹œ$PH‹[@ÿÓH‹”$H‹œ$H‰$H‹Z ÿÓH‹T$H‹L$H‰”$ H‰$H‰Œ$¨H‰L$HHl$H‰ïH‰ÞH¥H¥è¶\$ €ût,H‹H‰œ$HH‹H‰œ$PèHÄÐÃH‹œ$H‰œ$HH‹œ$H‰œ$PèHÄÐÃH‰L$XHƒù„- H‹Q@H‹iHH‰¬$xH‰,$H‰”$pHƒú„ HZ SjèH‹T$hYYH…À…Ö +H‹ZHûÈŒñH‹ZHûග$€û…€H‹z8H‰<$HH|$H‰ÞH¥H¥èH‹T$XH‹t$H‹L$ Hƒù…eH‰´$ H‰4$H‰Œ$¨H‰L$H-LD$L‰ÇH‰îH¥H¥èH‹T$X¶\$ €û„¶œ$€û„‘H=H‰<$Hƒút|Hj@H|$H‰îH¥H¥èH\$Hl$H‰ïH‰ÞH¥H¥Hœ$ H,$H‰ïH‰ÞH¥H¥èH‹T$(H‹L$0H‰”$0H‰”$HH‰Œ$8H‰Œ$PèHÄÐÉë€H=H‰<$Hƒú„`Hj@H|$H‰îH¥H¥èH‹D$H‹L$ H‰„$€H‰„$ÐH‰Œ$ˆH‰Œ$ØHH‰$èH‹L$H‰ÏHƒù„ø1ÀèH‰Œ$€H‰ $Hƒ<$„ÎH‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹œ$€H‰\$hHH‰$èH‹L$H‰Œ$˜H‹\$hH‰$HH‰„$`H‰D$H‰Œ$hH‰L$èH‹Œ$˜H‹t$H‹T$ H‰”$(H‹-H9îuvH‰´$ H‰4$H‰T$H‹-H‰l$H‹-H‰l$èH‹”$(H‹´$ H‹Œ$˜¶\$ €ût&HÇ„$HHÇ„$PèHÄÐÃHƒþH‰´$ tH‰´$HH‰”$PèHÄÐÃH‹Y8Hƒû„åHœ$HÇHÇCHœ$Hƒû„µHÇÅHÇÂH‰œ$H‰¬$ H‰”$(HH‰$H‰L$HƒD$0èH‹D$H‹L$H‹œ$H‰$H‰„$`H‰D$H‰Œ$hH‰L$èHœ$ H,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$ H‰\$H‹œ$(H‰\$ èH‹Œ$˜H‹YHƒû„çýÿÿHœ$HÇHÇCHœ$Hƒû„·HÇÅHÇÂH‰œ$H‰¬$ H‰”$(HH‰$H‰L$èH‹D$H‹L$H‹œ$H‰$H‰„$`H‰D$H‰Œ$hH‰L$èHœ$ H,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$ H‰\$H‹œ$(H‰\$ èéýÿÿ‰éBÿÿÿ‰éDþÿÿH‹YHƒû„YH¼$`1ÀèHœ$`Hƒû„1HÇÅHÇÂH‰œ$H‰¬$ H‰”$(HH‰$H‰L$èH‹D$H‹L$H‹œ$H‰$H‰„$`H‰D$H‰Œ$hH‰L$èHH‰$H‹œ$˜H‰\$HƒD$èH‹D$H‹L$H‹œ$HƒÃH‰$H‰„$`H‰D$H‰Œ$hH‰L$èHœ$ H,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥H‹œ$H‰\$ H‹œ$ H‰\$(H‹œ$(H‰\$0èH‹Œ$˜é¤ýÿÿ‰éÈþÿÿH‹Y(Hƒû„ýÿÿH‹i H‰¬$ÀH‹i(H‰¬$ÈHÇ„$ HÇ„$¨HH‰$èH‹\$H‰\$xH‹\$xH‰$Hƒ<$„¼H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹\$xH‰\$xH‹ 1íH9étSH‹T$xH‰Œ$°H‰Œ$ H‰”$¸H‰”$¨H‹œ$ H‰œ$HH‹œ$¨H‰œ$PèHÄÐÃHH‰$HH‰\$HH‰\$èH‹L$é{ÿÿÿ‰%é8ÿÿÿ‰%é&úÿÿ‰éúÿÿ‰é™ùÿÿ¶œ$€û„‘H=H‰<$Hƒút|Hj@H|$H‰îH¥H¥èH\$Hl$H‰ïH‰ÞH¥H¥Hœ$ H,$H‰ïH‰ÞH¥H¥èH‹T$(H‹L$0H‰”$0H‰”$HH‰Œ$8H‰Œ$PèHÄÐÉë€H=H‰<$HƒútzHj@H|$H‰îH¥H¥èH\$Hl$ H‰ïH‰ÞýHƒÆHƒÇHÇÁóH¥üHœ$ H,$H‰ïH‰ÞH¥H¥Hœ$0Hl$H‰ïH‰ÞH¥H¥èH‹T$8H‹L$@éCÿÿÿ‰ë‚H=H‰<$Hƒú„¸Hj@H|$H‰îH¥H¥èH\$H,$H‰ïH‰ÞH¥H¥èL‹D$L‰„$èH‹|$H‰¼$ðH‹t$ H‰´$øH‹L$(H‹T$0H‰”$HHƒùH‰Œ$@tH‰Œ$HH‰”$PèHÄÐÃH‹\$XH‹kH‰l$PL‰„$H‰¼$H‰´$HH‰$èH‹L$H‰Œ$ˆH‹l$PH‰)H‹œ$H‰$H‹œ$H‰\$H‹œ$H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰$Hƒ<$t}Hƒ$èH‹œ$ˆH‰œ$ˆH‹ 1íH9ét&H‹œ$ˆH‰œ$PH‰Œ$HèHÄÐÃHH‰$HH‰\$HH‰\$èH‹L$뫉%éwÿÿÿ‰éAþÿÿèHÄÐÉéùôÿÿ‰éÌôÿÿèHÄÐÉéÄñÿÿH‹œ$ØH‹kH‰,$H‹\$`H‰\$èH‹L$H‹T$H‹\$ H‰œ$8HƒúH‰”$0„`ôÿÿH‰$H‹Z ÿÓH‹T$H‹L$H‰”$ H‰$H‰Œ$¨H‰L$HHl$H‰ïH‰ÞH¥H¥è¶\$ €ût,H‹H‰œ$HH‹H‰œ$PèHÄÐÃH‹œ$0H‰œ$HH‹œ$8H‰œ$PèHÄÐÉEé|ïÿÿ‰EéNïÿÿ‰é°îÿÿ‰éîÿÿH‹¬$ض]€û…¥ìÿÿH‹œ$ØHƒ{h…’ìÿÿH‹œ$ØH‰$èH‹L$H‹T$H‰”$HƒùH‰Œ$„]ìÿÿH‰Œ$HH‰”$PèHÄÐÃHH‰$HH‰\$HH‰\$èH‹D$é”ëÿÿ‰%é*ëÿÿHƒø…¦ëÿÿH‹´$àH‰4$H‰D$H5LD$L‰ÇH¥H¥è¶\$ €û…Šêÿÿéjëÿÿ  +00runtime.morestack_noctxtÆ go.string."POST"è runtime.eqstringˆ"type.bytes.Readerš"runtime.newobjectª2runtime.writebarriersliceø>go.itab.*bytes.Reader.io.ReaderÂ(go.string."/version"ä runtime.eqstringÖ&"".(*Client).getURL‚ &net/http.NewRequestø &runtime.deferreturn° +,go.string."User-Agent"Ö +6go.string."go-dockerclient"þ +&net/http.Header.SetÊ  go.string."POST"ì  runtime.eqstring¨ 0go.string."Content-Type"È ,go.string."plain/text"ð &net/http.Header.Setž Ø runtime.duffzero¬ ,type.map[string]stringâ &runtime.mapiterinit˜&net/http.Header.Setº&runtime.mapiternext¬"io/ioutil.DiscardÊ"io/ioutil.Discardþ"io/ioutil.Discardœ"io/ioutil.Discardð go.string."unix"˜ runtime.eqstring¤net.DialÄ&runtime.deferreturnˆ"runtime.deferproc¬type.io.Readeròruntime.convI2I¢bufio.NewReaderÄtype.io.WriterŠruntime.convI2IÎ2net/http.(*Request).Writeº&runtime.deferreturnøtime.Nowþtime.Time.Addœ +È*net/http.ReadResponseÄ +ø +È encoding/json.(*Decoder).DecodeÞ- io.EOF˜. io.EOF°. io.EOFÄ.runtime.ifaceeqÄ/&runtime.deferreturnœ0&runtime.deferreturnô1type.stringœ2runtime.convT2E†32runtime.writebarrierifaceŠ4fmt.Fprintâ5type.stringþ5runtime.convT2Eè62runtime.writebarrierifaceì7fmt.FprintlnÌ8ð runtime.duffzeroÊ9type.stringæ9runtime.convT2EÐ:2runtime.writebarrierifaceÞ:type.string–;runtime.convT2Eˆ<2runtime.writebarrierifaceÂ<*go.string."%s %s\x0d"¸=fmt.Fprintfê>.type.errors.errorStringü>"runtime.newobjectö?4runtime.writebarrierstring˜@Bgo.itab.*errors.errorString.error¼A&runtime.deferreturnÚA0type.*errors.errorStringðAtype.errorˆBBgo.itab.*errors.errorString.errorœB runtime.typ2Itab¬Ctype.io.ReaderêCruntime.convI2IÈDio.Copy¨E&runtime.deferreturnÎEtype.io.ReaderŒFruntime.convI2I¸G®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdCopyìGtype.io.Reader²Hruntime.convI2IâH"io/ioutil.ReadAllœJ&runtime.deferreturn†Ktype."".Error˜K"runtime.newobject˜L2runtime.slicebytetostringúL4runtime.writebarrierstring¨M.go.itab.*"".Error.erroròM&runtime.deferreturnNtype.*"".Error¦Ntype.error¾N.go.itab.*"".Error.errorÒN runtime.typ2Itab’O&runtime.deferreturnÊO&runtime.deferreturn¦P*net/http.(*Client).DoQ +àQgo.itab.*bytes.Reader.io.ReaderÒV runtime.typ2ItabÂWgo.string."PUT"äW runtime.eqstring€  ‚"".autotmp_0360"type.interface {}"".autotmp_0359*type.*[1]interface {}"".autotmp_0358&type.[]interface {}"".autotmp_0357type.*uint8"".autotmp_0356type.error"".autotmp_0355¯ 0type.*errors.errorString"".autotmp_0354"type.interface {}"".autotmp_0353"type.interface {}"".autotmp_0351&type.[]interface {}"".autotmp_0350"type.interface {}"".autotmp_0348ï&type.[]interface {}"".autotmp_0347Ÿ 6type.*encoding/json.Decoder"".autotmp_03466type.*encoding/json.Decoder"".autotmp_0345type.io.Reader"".autotmp_0344type.*uint8"".autotmp_0343 type.*"".Error"".autotmp_0342type.*"".Error"".autotmp_0341type.time.Time"".autotmp_0340type.string"".autotmp_0338ÿ$type.*bytes.Reader"".autotmp_0337$type.*bytes.Reader"".autotmp_0336(type.[1]interface {}"".autotmp_03350type.*errors.errorString"".autotmp_0334ß(type.[2]interface {}"".autotmp_0333ÿ(type.[1]interface {}"".autotmp_0332type.error"".autotmp_0330type.string"".autotmp_0329type.*"".Error"".autotmp_0328type.bool"".autotmp_0327type.string"".autotmp_0325type.string"".autotmp_0324¿type.time.Time"".autotmp_0323type.time.Time"".autotmp_0322type.error"".autotmp_0320Ÿ6type.map.iter[string]string"".autotmp_0318ßtype.string"".autotmp_0317¿type.error"".autotmp_0316$type.*bytes.Reader +"".&mï(type.*"".jsonMessage "".~r0ßtype.errorerrors.text·2Ÿtype.string$encoding/json.r·2ÿtype.io.Reader"".bodyŸtype.[]uint8"".statusÿ type.intbytes.b·2ÿtype.[]uint8 "".errßtype.error "".decÏ 6type.*encoding/json.Decoder "".errŸtype.error"".bodyÏtype.[]uint8"".breader¿ $type.*bufio.Reader "".errÿtype.error"".dialÿtype.net.Conn"".addressŸtype.string"".protocolßtype.string"".respï .type.*net/http.Response "".val¿type.string "".key¿type.string "".err¿type.error "".reqß ,type.*net/http.Request "".errŸtype.error "".~r3àtype.error "".streamOptionsP*type."".streamOptions"".path0type.string"".methodtype.string"".ctype.*"".Client’%  ⟠  ¥Ÿ   ÞŸ   ÈŸ   -Ÿ   : ÙŸ   «Ÿ   +Ÿ   ÏŸ   õŸ   ¹Ÿ   êŸ   OŸ   Ÿ   ÌŸ   -Ÿ   ˜Ÿ   ‹,ŠðEXÖQ ˜C98ŽF$&.  Dd%L^•AMZ,.Q"‘_.À­0&+é Û Í¦(_.†G{¦;.HR,.M(# >A ìsYH²›FO# W ¨‚xc—^7:O<T?!­$wz +w} éb=b-1.g/^3“t?4@l5165høITgclocals·7e331f181b2554581236d61d5561e53dTgclocals·147deda5d3defe8e1d522f194155c84fì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ&"".(*Client).hijack€4ö3eH‹ %H„$èþÿÿH;AwèëâHì˜HH‰$èL‹D$L‰„$€H¬$ÈL‰ÇH‰îèHÇ„$HÇ„$ H‹œ$ÀHƒû…ù H‹¬$¸H‰,$H‹´$ÀH‰t$H5LD$L‰ÇH¥H¥èL‹„$€¶\$ €û„² HÇ„$¸HÇ„$ÀIƒx@„6Ih@H$H‰ßH‰îH¥H¥èH‹|$H‰¼$€H‹t$H‰´$ˆH‹l$ H‰¬$H‹L$(H‹T$0H‰”$ðHƒùH‰Œ$ètH‰Œ$H‰”$ èHĘÃH‰¼$hH‰´$pH‰¬$xHH‰$èH‹L$H‰ÏHƒù„Ò +1ÀèH‰L$pH‰ $Hƒ<$„« +H‹œ$hH‰\$H‹œ$pH‰\$H‹œ$xH‰\$èL‹„$€H‹\$pH‰\$pH‹ 1íH9é„! +H‹\$pH‰œ$ÀH‰Œ$¸Iƒx u.L‰$Hƒ$ H‹H‰\$H‹H‰\$èL‹„$€Iƒx0u&L‰$Hƒ$0H‹H‰\$H‹H‰\$èH‹œ$ H‰$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹T$H‹L$ H‹œ$¨H‰$H‹œ$°H‰\$H‰”$HH‰T$H‰Œ$PH‰L$H‹œ$¸H‰\$ H‹œ$ÀH‰\$(èH‹L$0H‹\$8H‰œ$øH‹\$@H‰œ$Hƒ¼$øt.H‹œ$øH‰œ$H‹œ$H‰œ$ èHĘÃH‰L$HH‹y8H‰<$HH|$H‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹Œ$ H‹i(Hƒý„rH‹UH‹EH‹i(Hƒý„TH‹]8H‰œ$XH‹]@H‰œ$`Hƒø…èH‰”$¨H‰$H‰„$°H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹Œ$ ¶\$ €û„žHÇ„$HÇ„$ H‹Y1íH9ëtYH‹œ$°Hƒû…·H‹¬$¨H‰,$H‹´$°H‰t$H5LD$L‰ÇH¥H¥èH‹Œ$ ¶\$ €û„pH‹œ$¨H‰$H‹œ$°H‰\$H‹œ$XH‰\$H‹œ$`H‰\$èH‹T$ H‰”$H‹L$(H‰Œ$ H‹\$0H‰œ$øH‹\$8H‰œ$Hƒ¼$øt.H‹œ$øH‰œ$H‹œ$H‰œ$ èHĘÃH‰$H‰L$HÇD$èH‹L$H‰L$PH‰ $H QjèYYH…À…~H‹\$PH‰$H‹\$HH‰\$èH‹Œ$€H‹1íH9ëtPH\$HHH‰$H‹)H‰l$H\$HH‰\$èHH‰$H‹œ$€H‹+H‰l$HÇD$èHH‰$èH‹\$H‰\$xHH‰$èH‹\$H‰œ$ H‹\$PH‰$èH‹T$H‹L$H‹\$H‰\$hH‹\$xH‰$H‰”$(H‰T$H‰Œ$0H‰L$èH‹œ$ H‰$H‹\$hH‰\$èH‹\$xH‹H‹kH‰¬$0H‰,$H‰”$(Hƒú„7HZ SjèYYH…À…HH‰$èH‹\$H‰œ$HH‰$HÇD$èH‹L$H‹œ$H‰$H‰L$èHH‰$èH‹\$H‰œ$˜HH‰$HÇD$èH‹L$H‹œ$˜H‰$H‰L$èHH‰$èH‹\$H‰œ$ˆHH‰$HÇD$èH‹L$H‹œ$ˆH‰$H‰L$èHH‰$èH‹L$H-H‰)H‰L$`H‰ $Hƒ<$„àHƒ$H‹œ$ˆH‰\$èH‹\$`H‰$Hƒ<$„©Hƒ$H‹œ$H‰\$èH‹\$`H‰$Hƒ<$„rHƒ$H‹œ$€H‰\$èH‹\$`H‰$Hƒ<$„;Hƒ$ H‹œ$ H‰\$èH‹\$`SjèYYHH‰$èH‹L$H-H‰)H‰L$XH‰ $Hƒ<$„ÖHƒ$H‹œ$€H‰\$èH‹\$XH‰$Hƒ<$„ŸHƒ$H‹\$xH‰\$èH‹\$XH‰$Hƒ<$„kHƒ$H‹œ$˜H‰\$èH‹\$XSjèYYHH‰$H‹œ$ˆH‹+H‰l$HÇD$èHÇ„$8HÇ„$@HH‰$H‹œ$˜H‹+H‰l$Hœ$8H‰\$èH‹œ$8H‰œ$ØH‹œ$@H‰œ$àHÇ„$8HÇ„$@HH‰$H‹œ$H‹+H‰l$Hœ$8H‰\$èH‹¬$ØH‹”$8H‰”$ÈH‹Œ$@H‰Œ$ÐHƒýt&H‰¬$H‹œ$àH‰œ$ èHĘÃH‰”$H‰Œ$ èHĘÉ%é‰þÿÿ‰%éUþÿÿ‰%éþÿÿ‰%é¹ýÿÿ‰%é‚ýÿÿ‰%éKýÿÿ‰%éýÿÿèHĘÉéÂûÿÿèHĘÃH‹œ$¨H‰$H‹œ$°H‰\$H‹œ$XH‰\$H‹œ$`H‰\$H‹iH‰l$ èH‹T$(H‰”$H‹L$0H‰Œ$ H‹\$8H‰œ$øH‹\$@H‰œ$Hƒ¼$ø„±ùÿÿH‹œ$øH‰œ$H‹œ$H‰œ$ èHĘÃHH‹+H‰¬$¨H‹kH‰¬$°H‹i(HƒýtH‹](H‰œ$XH‹]0H‰œ$`éøÿÿ‰EëÞ‰Eé¤÷ÿÿ‰Eé†÷ÿÿHH‰$HH‰\$HH‰\$èL‹„$€H‹L$é¥õÿÿ‰%éIõÿÿ‰é'õÿÿH‹¬$ ¶]€û…9ôÿÿH‹œ$ Hƒ{h…&ôÿÿH‹œ$ H‰$èL‹„$€H‹L$H‹T$H‰”$HƒùH‰Œ$„éóÿÿH‰Œ$H‰”$ èHĘà+00runtime.morestack_noctxtP*type."".hijackOptionsb"runtime.newobject¢è runtime.duffcopy¶(go.string."/version"Ø runtime.eqstringø*encoding/json.Marshal²&runtime.deferreturn€"type.bytes.Buffer’"runtime.newobjectÄÈ runtime.duffzeroÄ2runtime.writebarriersliceö>go.itab.*bytes.Buffer.io.Readerä"io/ioutil.Discardü"io/ioutil.Discard 2runtime.writebarrierifaceÎ "io/ioutil.Discardæ "io/ioutil.Discardú 2runtime.writebarrierifaceÐ +&"".(*Client).getURLˆ &net/http.NewRequest¨ &runtime.deferreturnà 0go.string."Content-Type"†,go.string."plain/text"®&net/http.Header.SetŠ go.string."unix"² runtime.eqstringˆ go.string."unix"ª runtime.eqstringÆnet.Dial&runtime.deferreturnÎ>net/http/httputil.NewClientConnøPnet/http/httputil.(*ClientConn).Close·fˆ"runtime.deferprocÎDnet/http/httputil.(*ClientConn).DoŠ&type.chan struct {}À"runtime.chansend1Î&type.chan struct {}’"runtime.chanrecv1 type.net.Conn²"runtime.newobjectÔ$type.*bufio.Readeræ"runtime.newobjectœLnet/http/httputil.(*ClientConn).Hijack”2runtime.writebarrierifaceÊ.runtime.writebarrierptr¶"runtime.deferprocÚtype.chan errorì"runtime.newobject”type.chan error¸ runtime.makechanî.runtime.writebarrierptrütype.chan errorŽ"runtime.newobject¶type.chan errorÚ runtime.makechan.runtime.writebarrierptržtype.chan bool°"runtime.newobjectØtype.chan boolü runtime.makechan².runtime.writebarrierptrÀÂtype.struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }Ò"runtime.newobjectê"".func·001Æ .runtime.writebarrierptrœ!.runtime.writebarrierptrò!.runtime.writebarrierptrÈ".runtime.writebarrierptrâ"runtime.newprocô"štype.struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }†#"runtime.newobjectž#"".func·002ú#.runtime.writebarrierptrÊ$.runtime.writebarrierptr %.runtime.writebarrierptrº%runtime.newprocÌ%type.chan bool&"runtime.chanrecv1Î&type.chan errorš'"runtime.chanrecv1˜(type.chan errorä("runtime.chanrecv1ü)&runtime.deferreturn¸*&runtime.deferreturnü+&runtime.deferreturn¦,&runtime.deferreturn¸-"".tlsDialŠ/&runtime.deferreturn¨/go.string."tcp"Ü0$type.*bytes.Bufferò0type.io.ReaderŠ1>go.itab.*bytes.Buffer.io.Readerž1 runtime.typ2ItabÚ28"".(*Client).checkAPIVersionÞ3&runtime.deferreturn°F"".autotmp_0393ÿœtype.*struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }"".autotmp_0392ïÄtype.*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }"".autotmp_0391ß$type.*bufio.Reader"".autotmp_0390ßtype.net.Conn"".autotmp_0388Ï$type.*bytes.Buffer"".autotmp_0387$type.*bytes.Buffer"".autotmp_0386type.error"".autotmp_0385¿type.error"".autotmp_0383type.chan error"".autotmp_0381Ÿtype.struct {}"".autotmp_0379Ÿtype.string"".autotmp_0378$type.*bytes.Buffer"".&errChanInÿ type.*chan error"".&rwc¿type.*net.Conn "".&brï&type.**bufio.Reader""".&hijackOptions¯,type.*"".hijackOptions"".&errChanOut type.*chan error"".&exitŸtype.*chan boolbytes.buf·2_type.[]uint8"".errOutŸtype.error"".errInÿtype.error"".clientconnDtype.*net/http/httputil.ClientConn"".dialÿtype.net.Conn"".addresstype.string"".protocolßtype.string "".err¿type.error "".reqŸ,type.*net/http.Request "".errßtype.error "".buf/type.[]uint8"".params¿type.io.Reader "".errŸtype.error "".~r3ðtype.error"".path0type.string"".methodtype.string"".ctype.*"".ClientÄ%°¿¯°ú¯°ó¯°-ÎÍ£¦¯°¯°a¯°¯°±¯°©¯€‚ÞmY Pº.&¦ .C&Td l .#  +')œAQQQÚ¬)em& $0 u. "M(+#®0{¬1Yf¼O +D‚Êd #Œ<Q&QQe L(8+EeK'ßh‹^ATgclocals·84e82484f467e1dc08e5640e075b9b76Tgclocals·65dfd25068bbba2abebc869f9ef9f7a5ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ&"".(*Client).getURL „eH‹ %HD$H;AwèëåHìðHÇ„$HÇ„$H‹œ$øH‹k(H‰,$èH‹L$H‹D$H‰L$xH‰ $H‰„$€H‰D$HHl$H‰ïH‰ÞH¥H¥èH‹L$ H‹D$(H‰L$8H‰D$@H‹œ$øH‹k(Hƒý„ÆH‹MH‰L$xH‹EH‰„$€Hƒøu@H‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €ûtHÇD$8HÇD$@H‹œ$øHƒ{8„ãH‹\$8H‰\$hH‹\$@H‰\$pH‹œ$H‰\$XH‹œ$H‰\$`H¼$À1ÀèHœ$ÀHƒû„HÇÂHÇÁH‰œ$ˆH‰”$H‰Œ$˜HH‰$H\$hH‰\$èH‹L$H‹D$H‹œ$ˆH‰$H‰L$HH‰L$H‰D$PH‰D$èHH‰$H‹œ$øH‰\$Hƒ|$„îHƒD$8èH‹L$H‹D$H‹œ$ˆHƒÃH‰$H‰L$HH‰L$H‰D$PH‰D$èHH‰$H\$XH‰\$èH‹L$H‹D$H‹œ$ˆHƒÃ H‰$H‰L$HH‰L$H‰D$PH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$ èH‹L$(H‹D$0H‰Œ$H‰„$HÄðÉ%éÿÿÿ‰élþÿÿH‹\$8H‰\$hH‹\$@H‰\$pH‹œ$H‰\$XH‹œ$H‰\$`H¼$ 1ÀèHœ$ Hƒû„HÇÁHÇÂH‰œ$ˆH‰Œ$H‰”$˜HH‰$H\$hH‰\$èH‹L$H‹D$H‹œ$ˆH‰$H‰L$HH‰L$H‰D$PH‰D$èHH‰$H\$XH‰\$èH‹L$H‹D$H‹œ$ˆHƒÃH‰$H‰L$HH‰L$H‰D$PH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$ èH‹L$(H‹D$0H‰Œ$H‰„$HÄðÉéÚþÿÿ‰Eé2üÿÿ8 +*0runtime.morestack_noctxt–*net/url.(*URL).Stringägo.string."/"Œ"strings.TrimRight¶ go.string."unix"Þ runtime.eqstring¶è runtime.duffzero´type.stringÚruntime.convT2E¸2runtime.writebarrierifaceÆ$type."".APIVersion–runtime.convT2Eü2runtime.writebarrierifaceŠ type.string° runtime.convT2E– +2runtime.writebarrieriface¤ +(go.string."%s/v%s%s"˜ fmt.Sprintfü ð runtime.duffzeroú type.string runtime.convT2Eþ2runtime.writebarrierifaceŒtype.string²runtime.convT2E˜2runtime.writebarrieriface¦ go.string."%s%s"šfmt.SprintfPà*"".autotmp_0415"type.interface {}"".autotmp_0414"type.interface {}"".autotmp_0412&type.[]interface {}"".autotmp_0411"type.interface {}"".autotmp_0410"type.interface {}"".autotmp_0409Ï"type.interface {}"".autotmp_0407Ï&type.[]interface {}"".autotmp_0406type.string"".autotmp_0405type.string"".autotmp_0404type.string"".autotmp_0403type.string"".autotmp_0402Ÿ(type.[2]interface {}"".autotmp_0401type.string"".autotmp_0400¯type.string"".autotmp_0399type.string"".autotmp_0398_(type.[3]interface {}"".autotmp_0396ïtype.string"".urlStrïtype.string "".~r10type.string"".pathtype.string"".ctype.*"".Client("àÏßà€ßà (’ +:d_ãõ (J¤¾ŸÄ½DTgclocals·61dac2719f307a892a4a15123f2e6a2dTgclocals·514c3d378a44440bceb597de19ebfbf7ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ"".queryStringÀ¢eH‹ %H„$ØþÿÿH;AwèëâHì¨H‹„$°HÇ„$ÀHÇ„$ÈHƒøu HÇ„$ÀHÇ„$ÈHĨÃH‰$H‹œ$¸H‰\$èH‹T$H‹L$H‹D$ H‰”$¨H‰$H‰Œ$°H‰L$H‰„$¸H‰D$èH‹¬$¨H‹”$°H‹Œ$¸H‹\$Hƒû…*H‰,$H‰T$H‰L$èH‹l$H‹T$ H‹L$(H‰¬$¨H‰,$H‰”$°H‰T$H‰Œ$¸H‰L$èH‹\$Hƒût HÇ„$ÀHÇ„$ÈHĨÃHH‰$HÇD$èH‹\$H‰œ$€HÇD$xH‹œ$¨H‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹D$H‹\$xH9ÃH‹œ$¨H‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹T$H‹D$ H‹\$xH‰\$H‰„$ H‰$H‰”$˜H‹ZXÿÓH\$H¬$@H‰ïH‰ÞèHœ$@H¬$ØH‰ïH‰ÞèH‹œ$ðHƒûtH‹\$xHÿÃH‰\$xéÿÿÿHœ$H,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹L$ H‹D$(H‰Œ$ˆH‰„$Hƒø…ÐHœ$ØH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$H‰Œ$ˆH‰„$H‹œ$¨H‰$H‹œ$°H‰\$H‹œ$¸H‰\$H‹\$xH‰\$èH‹T$ H‹L$(H‹D$0H‹œ$€H‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‰”$ÀH‰T$H‰Œ$ÈH‰L$ H‰„$ÐH‰D$(èéÉþÿÿHƒø…[ÿÿÿH‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €û…þÿÿé$ÿÿÿH‹œ$€H‰$èH‹L$H‹D$H‰Œ$ÀH‰„$ÈHĨÃéóüÿÿ* +00runtime.morestack_noctxtúreflect.ValueOfî$reflect.Value.Kindâ$reflect.Value.ElemÖ$reflect.Value.Kindº0type.map[string][]stringÞruntime.makemapà,reflect.Value.NumFieldÜ$reflect.Value.Type¼ +è” runtime.duffcopyž ” runtime.duffcopy˜ +go.string."qs"À +*reflect.StructTag.Get¾ strings.ToLowerÜ &reflect.Value.Fieldž,"".addQueryStringValueÜgo.string."-"„ runtime.eqstringÌ*net/url.Values.Encode@Ð "".autotmp_0433type.string"".autotmp_0431$type.reflect.Value"".autotmp_0430type.string"".autotmp_0428Ï0type.reflect.StructField"".autotmp_0427Ÿ"type.reflect.Type"".autotmp_0426type.int"".autotmp_0425"type.reflect.Kind"".autotmp_0424$type.reflect.Value"".autotmp_0422Ï$type.reflect.Value "".key¿type.string"".fieldŸ0type.reflect.StructField"".ißtype.int"".itemsÏ&type.net/url.Values"".valueÿ$type.reflect.Value "".~r1 type.string"".opts"type.interface {}4%ÐEÏÐîÏбÏР`¸ +E %R"6 &GŒ +C5–<3!6|:::…ð?Oa3$;Tgclocals·c69849cba6bf70a13b3371331d258b50Tgclocals·928ad969a3698656dfa33d91e2ca9cd1ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ,"".addQueryStringValueÀ@¶@eH‹ %H„$PþÿÿH;AwèëâHì0H‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$èH‹¬$PH‹”$XH‹Œ$`H‹D$Hƒø‡ŠHƒø‡]Hƒø…ûH‰,$H‰T$H‰L$è¶\$€û„ØH‹”$8H‹Œ$@H‹„$HHH‹+H‰¬$ÀH‹kH‰¬$ÈH‰Œ$H‰„$˜H‰Œ$H‰„$HH‰$H‰T$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$èH‹\$ Hƒû„8H‹H‹KH‹[H‰”$H‰Œ$ H‰œ$(H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ØH‰„$àH‰ÓH‰”$ÐH‰ÍHkíHëH‰$H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹œ$ÐH‰œ$H‹œ$ØH‰œ$H‹œ$àH‰œ$HH‰$H‹\$xH‰\$Hœ$H‰\$Hœ$H‰\$èHÄ0ÉéÁþÿÿëïHƒø…?H‰,$H‰T$H‰L$èH‹\$HƒûŽH‹œ$8H‰\$hH‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$èH‹\$H‰$HÇD$ +èH‹l$H‹T$H‹Œ$@H‹„$HH‰¬$€H‰”$ˆH‰Œ$H‰„$˜H‰Œ$àH‰„$èHH‰$H‹\$hH‰\$H‰Œ$€H‰L$H‰„$ˆH‰D$èH‹\$ Hƒû„5H‹H‹KH‹[H‰”$H‰Œ$ H‰œ$(H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰¬$ØH‰„$àH‰ÓH‰”$ÐH‰ÍHkíHëH‰$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹œ$ÐH‰œ$H‹œ$ØH‰œ$H‹œ$àH‰œ$HH‰$H‹\$hH‰\$Hœ$H‰\$Hœ$H‰\$èé²ýÿÿ‰éÄþÿÿé¦ýÿÿHƒø„·ýÿÿé—ýÿÿHƒø„¨ýÿÿHƒø„žýÿÿHƒø„”ýÿÿétýÿÿHƒø‡QHƒø …TH‰,$H‰T$H‰L$èòD$ò f.Áwé4ýÿÿH‹œ$8H‰\$PH‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$èòD$ò$ÆD$fHÇD$ÿÿÿÿHÇD$@èH‹l$ H‹T$(H‹Œ$@H‹„$HH‰¬$ H‰”$¨H‰Œ$H‰„$˜H‰Œ$H‰„$HH‰$H‹\$PH‰\$H‰Œ$€H‰L$H‰„$ˆH‰D$èH‹\$ Hƒû„5H‹H‹KH‹[H‰”$H‰Œ$ H‰œ$(H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰ÓH‰´$ØH‰„$àH‰”$ÐH‰ÍHkíHëH‰$H‹œ$ H‰\$H‹œ$¨H‰\$èH‹œ$ÐH‰œ$H‹œ$ØH‰œ$H‹œ$àH‰œ$HH‰$H‹\$PH‰\$Hœ$H‰\$Hœ$H‰\$èéûÿÿ‰éÄþÿÿHƒø„¢ýÿÿHƒø…øúÿÿH‰,$H‰T$H‰L$èH‹\$H‰\$@H‹\$@HƒûŽÌúÿÿ1ÀH‹l$@H9è¼úÿÿH‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$H‰D$HH‰D$èH‹T$ H‹L$(H‹D$0H‹œ$8H‰$H‹œ$@H‰\$H‹œ$HH‰\$H‰”$èH‰T$H‰Œ$ðH‰L$ H‰„$øH‰D$(èH‹D$HHÿÀéOÿÿÿHƒø‡yHƒø…¸H‰,$H‰T$H‰L$èH‹\$H‹D$ H‹\$(HƒøŽ‡H‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$èH‹L$H‹D$ H‰Œ$`H‰ $H‰„$hH‰D$èH‹l$H‹T$H‹L$ H‹D$(H‹\$0H‰œ$XHƒøH‰„$P…SùÿÿH‹œ$8H‰\$pH‰¬$¸H‰,$H‰”$ÀH‰T$H‰Œ$ÈH‰L$èH‹l$H‹T$ H‹Œ$@H‹„$HH‰¬$ÐH‰”$ØH‰Œ$H‰„$˜H‰Œ$0H‰„$8HH‰$H‹\$pH‰\$H‰Œ$pH‰L$H‰„$xH‰D$èH‹\$ Hƒû„5H‹H‹KH‹[H‰”$H‰Œ$ H‰œ$(H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ØH‰„$àH‰ÓH‰”$ÐH‰ÍHkíHëH‰$H‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹œ$ÐH‰œ$H‹œ$ØH‰œ$H‹œ$àH‰œ$HH‰$H‹\$pH‰\$Hœ$H‰\$Hœ$H‰\$èéY÷ÿÿ‰éÄþÿÿéM÷ÿÿHƒø…C÷ÿÿH‰,$H‰T$H‰L$è¶\$€û…‡H‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$èH‹L$H‹D$ H‰Œ$`H‰ $H‰„$hH‰D$èH‹l$H‹T$H‹L$ H‹D$(H‹\$0H‰œ$HHƒøH‰„$@…œöÿÿH‹œ$8H‰\$XH‰¬$ H‰,$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹l$H‹T$ H‹Œ$@H‹„$HH‰¬$°H‰”$¸H‰Œ$H‰„$˜H‰Œ$ H‰„$(HH‰$H‹\$XH‰\$H‰Œ$pH‰L$H‰„$xH‰D$èH‹\$ Hƒû„5H‹H‹KH‹[H‰”$H‰Œ$ H‰œ$(H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰¬$ØH‰„$àH‰ÓH‰”$ÐH‰ÍHkíHëH‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹œ$ÐH‰œ$H‹œ$ØH‰œ$H‹œ$àH‰œ$HH‰$H‹\$XH‰\$Hœ$H‰\$Hœ$H‰\$èé¢ôÿÿ‰éÄþÿÿé–ôÿÿHƒø„”ùÿÿHƒø…‚ôÿÿH‰,$H‰T$H‰L$èH‹\$H‹\$ Hƒû„[ôÿÿH‹œ$8H‰\$`H‹œ$PH‰$H‹œ$XH‰\$H‹œ$`H‰\$èH‹l$H‹T$ H‹Œ$@H‹„$HH‰¬$H‰”$˜H‰Œ$H‰„$˜H‰Œ$ðH‰„$øHH‰$H‹\$`H‰\$H‰Œ$pH‰L$H‰„$xH‰D$èH‹\$ Hƒû„5H‹H‹KH‹[H‰”$H‰Œ$ H‰œ$(H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ØH‰„$àH‰ÓH‰”$ÐH‰ÍHkíHëH‰$H‹œ$H‰\$H‹œ$˜H‰\$èH‹œ$ÐH‰œ$H‹œ$ØH‰œ$H‹œ$àH‰œ$HH‰$H‹\$`H‰\$Hœ$H‰\$Hœ$H‰\$èéaòÿÿ‰éÄþÿÿ† +00runtime.morestack_noctxt˜$reflect.Value.Kind´$reflect.Value.BoolŽgo.string."1"Š&type.net/url.Valuesä4runtime.mapaccess1_faststrîtype.[]stringà"runtime.growsliceš4runtime.writebarrierstringˆ &type.net/url.Valuesâ $runtime.mapassign1¾ +"reflect.Value.IntÌ "reflect.Value.Intú "strconv.FormatIntœ &type.net/url.Valuesö 4runtime.mapaccess1_faststr€type.[]stringò"runtime.growslice¬4runtime.writebarrierstringš&type.net/url.Valuesô$runtime.mapassign1È&reflect.Value.Floatä*$f64.0000000000000000ê&reflect.Value.Float¸&strconv.FormatFloatÚ&type.net/url.Values´4runtime.mapaccess1_faststr¾type.[]string°"runtime.growsliceê4runtime.writebarrierstringØ&type.net/url.Values²$runtime.mapassign1˜"reflect.Value.LenÔ&reflect.Value.Index–!,"".addQueryStringValueþ!*reflect.Value.MapKeys†#.reflect.Value.InterfaceÖ#*encoding/json.Marshal¬%2runtime.slicebytetostringÎ&&type.net/url.Values¨'4runtime.mapaccess1_faststr²(type.[]string¤)"runtime.growsliceÞ*4runtime.writebarrierstringÌ+&type.net/url.Values¦,$runtime.mapassign1‚-&reflect.Value.IsNilô-.reflect.Value.InterfaceÄ.*encoding/json.Marshalš02runtime.slicebytetostring¼1&type.net/url.Values–24runtime.mapaccess1_faststr 3type.[]string’4"runtime.growsliceÌ54runtime.writebarrierstringº6&type.net/url.Values”7$runtime.mapassign1„8(reflect.Value.Stringœ9(reflect.Value.String¾:&type.net/url.Values˜;4runtime.mapaccess1_faststr¢<type.[]string”="runtime.growsliceÎ>4runtime.writebarrierstring¼?&type.net/url.Values–@$runtime.mapassign1`à´"".autotmp_0509type.uint64"".autotmp_0508type.uint64"".autotmp_0507type.int"".autotmp_0506type.int"".autotmp_0505type.[]string"".autotmp_0504type.string"".autotmp_0503type.uint64"".autotmp_0502type.uint64"".autotmp_0501type.int"".autotmp_0500type.int"".autotmp_0499type.[]string"".autotmp_0498type.string"".autotmp_0497type.uint64"".autotmp_0496type.uint64"".autotmp_0495type.int"".autotmp_0494type.int"".autotmp_0493type.[]string"".autotmp_0492type.uint64"".autotmp_0491type.uint64"".autotmp_0490type.int"".autotmp_0489type.int"".autotmp_0488type.[]string"".autotmp_0487type.uint64"".autotmp_0486type.uint64"".autotmp_0485type.int"".autotmp_0484type.int"".autotmp_0483type.[]string"".autotmp_0478¿type.[]string"".autotmp_0476type.int"".autotmp_0475$type.reflect.Value"".autotmp_0474type.int"".autotmp_0473type.[]string"".autotmp_0472type.[]string"".autotmp_0471type.string"".autotmp_0470type.string"".autotmp_0469"type.interface {}"".autotmp_0468type.int"".autotmp_0466type.[]string"".autotmp_0465type.[]string"".autotmp_0464type.string"".autotmp_0463type.string"".autotmp_0462Ÿ"type.interface {}"".autotmp_0461type.bool"".autotmp_0460type.[]string"".autotmp_0459type.[]string"".autotmp_0458type.string"".autotmp_0457type.string"".autotmp_0456ÿtype.string"".autotmp_0455type.string"".autotmp_0454type.[]string"".autotmp_0453type.[]string"".autotmp_0452type.string"".autotmp_0451type.string"".autotmp_0450type.string"".autotmp_0447type.[]string"".autotmp_0446type.[]string"".autotmp_0445type.string"".autotmp_0444type.string"".autotmp_0443type.string"".autotmp_0440_type.[]string"".autotmp_0439/type.[]string"".autotmp_0438ßtype.string"".autotmp_0437¿type.string net/url.value·3¿type.stringnet/url.key·2ÿtype.stringnet/url.v·1ÿ&type.net/url.Values net/url.value·3ÿtype.stringnet/url.key·2Ÿtype.stringnet/url.v·1¯&type.net/url.Values net/url.value·3¿type.stringnet/url.key·2ÿtype.stringnet/url.v·1Ÿ&type.net/url.Values net/url.value·3Ÿtype.stringnet/url.key·2ßtype.stringnet/url.v·1¿&type.net/url.Values net/url.value·3ßtype.stringnet/url.key·2Ÿtype.stringnet/url.v·1&type.net/url.Values net/url.value·3ßtype.stringnet/url.key·2¿type.stringnet/url.v·1ï&type.net/url.Values"".iÏtype.int"".vLenßtype.int "".err¿type.error"".bïtype.[]uint8 "".errßtype.error"".bŸtype.[]uint8"".v0$type.reflect.Value "".keytype.string"".items&type.net/url.Values%à×ß࣠ æð +%\ +!ÉDC + +"Œ   + +  +  +'œ +( +– ? +. +,†õ343" +!†õ'(': + +'õ KæÛd.G•ÛdjQ¥Ûd 3Ê~Ûd.Ì~Ûd8L ~ÛdTgclocals·b8c550e5e1ba1f11f1bc237b9d0f0dc8Tgclocals·a157d5303e3a20a2392145ef25e4599bì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ"".newErroràÈeH‹ %H;awèëêHƒì0HH‰$èH‹D$H‰D$(H‹l$8H‰(H‹\$@H‰$H‹\$HH‰\$H‹\$PH‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$(H‰$Hƒ<$tHƒ$èH‹\$(H‰\$XHƒÄ0É%ëÞ + 0runtime.morestack_noctxt:type."".ErrorL"runtime.newobject´2runtime.slicebytetostring4runtime.writebarrierstringP`"".autotmp_0528type.*"".Error "".~r2@type.*"".Error"".bodytype.[]uint8"".statustype.int`€_`°Ì –%4WTgclocals·3e69739b44630d52358b28c7a0e238faTgclocals·e1ae6533a9e39048ba0735a2264ce16aì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ""".(*Error).ErrorÀ´eH‹ %H;awèëêHì€HÇ„$HÇ„$˜H|$`1ÀèH\$`Hƒû„CHÇÂHÇÁH‰\$HH‰T$PH‰L$XHH‰$H‹œ$ˆH‰\$Hƒ|$„öèH‹L$H‹D$H‹\$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH‰$H‹œ$ˆH‰\$Hƒ|$„•HƒD$èH‹L$H‹D$H‹\$HHƒÃH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$ èH‹L$(H‹D$0H‰Œ$H‰„$˜HĀÉ%é_ÿÿÿ‰%éþþÿÿ‰é¶þÿÿ + 0runtime.morestack_noctxtzð runtime.duffzeroàtype.int¤runtime.convT2Eü2runtime.writebarrierifaceŠtype.stringÚruntime.convT2Eº2runtime.writebarrierifaceÈTgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".Port.Proto€æeH‹ %H;awèëêHƒì8HÇD$PHÇD$XH‹\$@H‰$H‹t$HH‰t$H5Hl$H‰ïH¥H¥èH‹T$ H‹D$(H‹L$0HƒøuHH‹+H‰l$PH‹kH‰l$XHƒÄ8ÃH‰ÓHƒøvHƒÃH‹+H‰l$PH‹kH‰l$XHƒÄ8Ãè + 0runtime.morestack_noctxt„go.string."/"¦strings.SplitÞgo.string."tcp"Ú$runtime.panicindex@p "".~r0 type.string"".ptype."".Portpnop"opÀ®,:7 +RnTgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ$"".(*State).String    eH‹ %HD$ØH;AwèëåHì¨H‹´$°HÇ„$¸HÇ„$À¶€û„­¶^€ût&HH‹+H‰¬$¸H‹kH‰¬$ÀHĨÃèH‹$‹L$H‹D$H‹H‰T$x‰Œ$€H‰„$ˆH‰T$`H‰$‰L$h‰L$H‰D$pH‰D$H‹¼$°Hƒÿ„Ho(H|$H‰îH¥H¥H¥èH‹\$0H‰\$8H\$PHÇHÇCH\$PHƒû„ÒHÇÂHÇÁH‰œ$H‰”$˜H‰Œ$ HH‰$H\$8H‰\$èH‹L$H‹D$H‹œ$H‰$H‰L$@H‰L$H‰D$HH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$˜H‰\$H‹œ$ H‰\$ èH‹L$(H‹D$0H‰Œ$¸H‰„$ÀHĨÉé'ÿÿÿ‰éÜþÿÿH\$PHÇHÇCH\$PHƒû„ëHÇÁHÇÂH‰œ$H‰Œ$˜H‰”$ HH‰$H‰t$Hƒ|$„HƒD$èH‹L$H‹D$H‹œ$H‰$H‰L$@H‰L$H‰D$HH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$˜H‰\$H‹œ$ H‰\$ èH‹L$(H‹D$0H‰Œ$¸H‰„$ÀHĨÉ%éWÿÿÿ‰éÿÿÿ +*0runtime.morestack_noctxt´$go.string."paused"ütime.Now¤time.UTCÔtime.Time.Subˆ$type.time.Duration®runtime.convT2EŒ2runtime.writebarrierifaceš"go.string."Up %s"Žfmt.SprintfŽ type.intÎ runtime.convT2E¬ +2runtime.writebarrierifaceº +&go.string."Exit %d"® fmt.Sprintf0Ð"".autotmp_0599"type.interface {}"".autotmp_0598*type.*[1]interface {}"".autotmp_0597&type.[]interface {}"".autotmp_0596Ï"type.interface {}"".autotmp_0594/&type.[]interface {}"".autotmp_0593type.string"".autotmp_0592(type.[1]interface {}"".autotmp_0590ß$type.time.Duration"".autotmp_0588¯(type.[1]interface {} "".~r0type.time.Timetime.t·2_type.time.Time "".~r0type.string"".stype.*"".State4"ÐZÏÐïÏÐÏÐ ÚB  &þ•}Ùp p:Tgclocals·6d340c3bdac448a6ef1256f331f68dd3Tgclocals·f56fbe9e0c86cdefdb59a7a88f742314ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþH"".(*NetworkSettings).PortMappingAPI€€eH‹ %H„$ þÿÿH;AwèëâHìà1ÀH¼$0èHÇ„$ðHÇ„$øHÇ„$HÇ„$HÇ„$˜HÇ„$ H‹œ$èH‹kPH¼$1ÀèHH‰$H‰l$Hœ$H‰\$èH‹œ$1íH9ë„4H‹œ$˜Hƒû„ÛH‹;H‹sH‹SH‹œ$Hƒû„·H‹ H‹kH‰¼$ØH‰¼$¨H‰´$àH‰´$°H‰”$èH‰”$¸H‰L$pH‰ $H‰l$xH‰l$èH‹L$H‹D$H‰Œ$€H‰ $H‰„$ˆH‰D$èH‹Œ$°H‹\$H‰\$HHƒù…ªH‹\$pH‰$H‹\$xH‰\$èH‹T$H‹L$H¼$`1ÀèH‹\$HH‰œ$hH‰”$€H‰”$pH‰Œ$ˆH‰Œ$xH‹”$H‹Œ$˜H‹œ$ H‰ØH)ËHƒû}OHH‰$H‰”$ÀH‰T$H‰Œ$ÈH‰L$H‰„$ÐH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ÈH‰„$ÐHH‰$H‰ÓH‰”$ÀH‰ÍHkí0HëH‰\$Hœ$`H‰\$èH‹”$ÀH‹Œ$ÈH‹„$ÐH‰”$H‰Œ$˜H‰„$ Hœ$H‰$èH‹œ$1íH9ë…ÌýÿÿH‹œ$H‰œ$ðH‹œ$˜H‰œ$øH‹œ$ H‰œ$HÄàÃH‹„$¨H‹œ$¸H‰œ$è1ÒH‰Œ$àH‰L$XH‰„$ØH‹l$XH9êgÿÿÿH‰D$hHƒø„'H¬$H‰ïH‰ÆèH‰T$`Hœ$H¬$ðH‰ïH‰ÞèH‹\$pH‰$H‹\$xH‰\$èH‹L$H‹D$H‰Œ$€H‰ $H‰„$ˆH‰D$èH‹t$H‰t$@H´$H,$H‰ïH¥H¥èH‹\$H‰\$PH‹\$pH‰$H‹\$xH‰\$èH‹T$H‹L$H¼$01ÀèH‹\$@H‰œ$0H‹t$PH‰´$8H‰”$€H‰”$@H‰Œ$ˆH‰Œ$HH´$ðH¬$PH‰ïH¥H¥H‹”$H‹Œ$˜H‹œ$ H‰ØH)ËHƒû}OHH‰$H‰”$ÀH‰T$H‰Œ$ÈH‰L$H‰„$ÐH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ÈH‰„$ÐHH‰$H‰ÓH‰”$ÀH‰ÍHkí0HëH‰\$Hœ$0H‰\$èH‹”$ÀH‹Œ$ÈH‹„$ÐH‰”$H‰Œ$˜H‰„$ H‹D$hH‹T$`HƒÀ HÿÂé¼ýÿÿ‰éÒýÿÿ‰éBûÿÿ‰éûÿÿ4 +00runtime.morestack_noctxt`Ð runtime.duffzero¦Ø runtime.duffzero´Btype.map["".Port][]"".PortBindingê&runtime.mapiterinitŒ"".Port.PortÜ"".parsePortÄ"".Port.Protoöè runtime.duffzero¦"type.[]"".APIPort˜ "runtime.growsliceð type."".APIPortÐ +.runtime.writebarrierfatÒ &runtime.mapiternext¦ runtime.duffcopyæ runtime.duffcopy–"".Port.Portæ"".parsePortª"".parsePortî"".Port.Proto è runtime.duffzero˜"type.[]"".APIPortŠ"runtime.growsliceâtype."".APIPortÂ.runtime.writebarrierfat@À<"".autotmp_0632type.uint64"".autotmp_0631type.uint64"".autotmp_0630type.int"".autotmp_0629type.int"".autotmp_0628"type.[]"".APIPort"".autotmp_0627ßtype."".APIPort"".autotmp_0626Ÿ&type."".PortBinding"".autotmp_0625ï(type.*"".PortBinding"".autotmp_0624type.int"".autotmp_0623type.int"".autotmp_0619ÿtype.int"".autotmp_0618¿"type.[]"".APIPort"".autotmp_0617ÿtype."".APIPort"".autotmp_0616*type.[]"".PortBinding"".autotmp_0615"type.[]"".APIPort"".autotmp_0614type.string"".autotmp_0613type.string"".autotmp_0612*type.[]"".PortBinding"".autotmp_0610type.string"".autotmp_0608¿type.string"".autotmp_0607ŸLtype.map.iter["".Port][]"".PortBinding"".hŸtype.int"".p¿type.int"".bindingß&type."".PortBinding"".p¯type.int"".bindingsï*type.[]"".PortBinding"".portßtype."".Port"".mappingŸ"type.[]"".APIPort "".~r0"type.[]"".APIPort"".settings0type.*"".NetworkSettings%À’¿ÀˆÀ J²X$¶R +"¬ $(8J""Ð4´‘\ª\Aâº\`Tgclocals·8ba904616303767b538615d50f9b7d50Tgclocals·61adfc392b1c041e58d54f9101a6f0b7ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".parsePortÀ¾eH‹ %H;awèëêHƒì8HÇD$XHÇD$`H‹\$@H‰$H‹\$HH‰\$HÇD$ +HÇD$èH‹T$ H‹D$(H‹L$0HƒøtHÇD$PH‰D$XH‰L$`HƒÄ8ÃH‰T$PHÇD$XHÇD$`HƒÄ8à + 0runtime.morestack_noctxt¤"strconv.ParseUintPp "".~r20type.error "".~r1 type.int"".rawPorttype.stringphopo ä,9 +QOTgclocals·5dfce38b1d248a3900c6ec750de77702Tgclocals·3280bececceccd33cb74587feedb1f9fò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ8"".(*Client).RenameContainerà Ü eH‹ %HD$€H;AwèëåHìHÇ„$0HÇ„$8Hœ$H¬$àH‰ïH‰ÞèHH‰$Hœ$àH‰\$èH\$H,$H‰ïH‰ÞH¥H¥èH‹\$H‰œ$H‹\$H‰œ$˜Hœ$ HÇHÇCHœ$ Hƒû„šHÇÂHÇÁH‰œ$ÈH‰”$ÐH‰Œ$ØHH‰$Hœ$H‰\$èH‹L$H‹D$H‹œ$ÈH‰$H‰L$pH‰L$H‰D$xH‰D$èHH,$H‰ïH‰ÞH¥H¥Hœ$Hl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥èH\$0H,$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$H‹œ$ÐH‰\$H‹œ$ØH‰\$ èH‹L$(H‹D$0Hœ$°HÇHÇCHÇCH‹´$H‰4$H5Hl$H‰ïH¥H¥H‰Œ$€H‰L$H‰„$ˆH‰D$ Hœ$°Hl$(H‰ïH‰ÞH¥H¥H¥èH‹L$`H‹D$hH‰Œ$0H‰„$8HÄÉé_þÿÿ +*0runtime.morestack_noctxt¢ runtime.duffcopy°go.string."/containers/create?"Ž*runtime.concatstring2øætype.struct { *"".Config; HostConfig *"".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\"" }¤runtime.convT2Eø go.string."POST"ô"".(*Client).do¦""".ErrNoSuchImageÄ""".ErrNoSuchImageš 8"".ErrContainerAlreadyExists¸ 8"".ErrContainerAlreadyExistsÊ +"type."".ContainerÜ +"runtime.newobjectÊ $type.*"".Containerˆ .encoding/json.Unmarshalä 4runtime.writebarrierstring€°"".autotmp_0673o"type."".doOptions"".autotmp_0670ßætype.struct { *"".Config; HostConfig *"".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\"" }"".autotmp_0669¿type.string"".autotmp_0668?22$c$*: páhôV­Tgclocals·3f5a7d1842b14039f35be09ef67df5f8Tgclocals·2ca41a02a2a5788f97a4be1897b36700ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ "".AlwaysRestartàÔHƒìH\$ HÇHÇCHÇCH$HÇHÇCHÇCH$H-H‰ßH‰îH¥H¥H$Hl$ H‰ïH‰ÞH¥H¥H¥HƒÄÄ$go.string."always"00"".autotmp_0676/*type."".RestartPolicy "".~r0*type."".RestartPolicy0e/p ¶ PTgclocals·0528ab8f76149a707fd2f0025c2178a3Tgclocals·0528ab8f76149a707fd2f0025c2178a3ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ&"".RestartOnFailure€âHƒìH\$(HÇHÇCHÇCH$HÇHÇCHÇCH$H-H‰ßH‰îH¥H¥H‹t$ H‰t$H4$Hl$(H‰ïH¥H¥H¥HƒÄÄ,go.string."on-failure"@0"".autotmp_0677/*type."".RestartPolicy "".~r1*type."".RestartPolicy"".maxRetrytype.int0l/€  `Tgclocals·2d8f3a7439ca173dec4205ff264b0edcTgclocals·0528ab8f76149a707fd2f0025c2178a3ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".NeverRestartàÔHƒìH\$ HÇHÇCHÇCH$HÇHÇCHÇCH$H-H‰ßH‰îH¥H¥H$Hl$ H‰ïH‰ÞH¥H¥H¥HƒÄÄgo.string."no"00"".autotmp_0678/*type."".RestartPolicy "".~r0*type."".RestartPolicy0e/p Î PTgclocals·0528ab8f76149a707fd2f0025c2178a3Tgclocals·0528ab8f76149a707fd2f0025c2178a3ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ6"".(*Client).StartContainerà Î eH‹ %HD$¸H;AwèëåHìÈHÇ„$ðHÇ„$øHH,$H‰ïH‰ÞH¥H¥H‹œ$ØH‰\$H‹´$àH‰t$H5Hl$ H‰ïH¥H¥èL‹D$0H‹T$8H‹Œ$èHœ$°HÇHÇCHÇCHH‰„$ H‰„$°H‰Œ$¨H‰Œ$¸HÇÈœ$ÀH‹´$ÐH‰4$H5Hl$H‰ïH¥H¥L‰„$€L‰D$H‰”$ˆH‰T$ Hœ$°Hl$(H‰ïH‰ÞH¥H¥H¥èH‹D$XH‹L$`H‰Œ$H‹T$hH‰”$˜H=”…üHH‰$èH‹D$H‰D$xH‰$Hƒ<$„ÇH‹œ$ØH‰\$H‹œ$àH‰\$èH‹\$xH‰$Hƒ<$„ˆHƒ$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$xH‰\$xH‹1íH9ètH‹\$xH‰œ$øH‰„$ðHÄÈÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%élÿÿÿ‰%é-ÿÿÿH=0…¸HH‰$èH‹D$H‰D$pH‰$Hƒ<$„ƒH‹œ$ØH‰\$H‹œ$àH‰\$èH‹\$pH‰\$pH‹1íH9ètH‹\$pH‰œ$øH‰„$ðHÄÈÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%éqÿÿÿHƒùtH‰Œ$ðH‰”$øHÄÈÃHÇ„$ðHÇ„$øHÄÈÃ2 +*0runtime.morestack_noctxtz0go.string."/containers/"Ø$go.string."/start"ú*runtime.concatstring3ê&type.*"".HostConfigì go.string."POST"ô"".(*Client).doØ.type."".NoSuchContainerê"runtime.newobjectÚ4runtime.writebarrierstringÊ2runtime.writebarrierifaceìBgo.itab.*"".NoSuchContainer.errorÂ0type.*"".NoSuchContainerØtype.errorðBgo.itab.*"".NoSuchContainer.error„  runtime.typ2Itabè >type."".ContainerAlreadyRunningú "runtime.newobjectê +4runtime.writebarrierstringŒ Rgo.itab.*"".ContainerAlreadyRunning.errorâ @type.*"".ContainerAlreadyRunningø type.error Rgo.itab.*"".ContainerAlreadyRunning.error¤  runtime.typ2Itab`"".autotmp_0686type.*uint8"".autotmp_0685¯@type.*"".ContainerAlreadyRunning"".autotmp_0683Ÿ0type.*"".NoSuchContainer"".autotmp_0682/"type."".doOptions"".autotmp_0681@type.*"".ContainerAlreadyRunning"".autotmp_06800type.*"".NoSuchContainer "".errotype.error"".pathtype.string "".~r2@type.error"".hostConfig0&type.*"".HostConfig +"".idtype.string"".ctype.*"".Client:"ûÏX +ð2²:QÒ ü ¸)$|ø88˜8¼Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·1497b0fbec88b963d1dc5f4cf9421516ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ4"".(*Client).StopContaineràÔeH‹ %H„$hÿÿÿH;AwèëâHìHÇ„$@HÇ„$HH‹œ$(H‰œ$¸H‹œ$0H‰œ$ÀH‹œ$8H‰\$pH¼$ø1ÀèHœ$øHƒû„˜HÇÂHÇÁH‰œ$àH‰”$èH‰Œ$ðHH‰$Hœ$¸H‰\$èH‹L$H‹D$H‹œ$àH‰$H‰Œ$¨H‰L$H‰„$°H‰D$èHH‰$H\$pH‰\$èH‹L$H‹D$H‹œ$àHƒÃH‰$H‰Œ$¨H‰L$H‰„$°H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$ èH‹L$(H‹D$0Hœ$ÈHÇHÇCHÇCH‹´$ H‰4$H5Hl$H‰ïH¥H¥H‰Œ$ˆH‰L$H‰„$H‰D$ Hœ$ÈHl$(H‰ïH‰ÞH¥H¥H¥èH‹D$XH‹L$`H‰Œ$˜H‹T$hH‰”$ H=”…ßHH‰$èH‹L$H‰ÏHƒù„¶1ÀèH‰Œ$€H‰ $Hƒ<$„ŒH‹œ$(H‰\$H‹œ$0H‰\$èH‹œ$€H‰œ$€H‹1íH9èt H‹œ$€H‰œ$HH‰„$@HÄÃHH‰$HH‰\$HH‰\$èH‹D$뱉%éhÿÿÿ‰éCÿÿÿH=0…¸HH‰$èH‹D$H‰D$xH‰$Hƒ<$„ƒH‹œ$(H‰\$H‹œ$0H‰\$èH‹\$xH‰\$xH‹1íH9ètH‹\$xH‰œ$HH‰„$@HÄÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%éqÿÿÿHƒùtH‰Œ$@H‰”$HHÄÃHÇ„$@HÇ„$HHÄÉéaüÿÿ< +00runtime.morestack_noctxtêð runtime.duffzeroètype.string”runtime.convT2Eþ2runtime.writebarrierifaceŒtype.uint²runtime.convT2E¤2runtime.writebarrieriface²Hgo.string."/containers/%s/stop?t=%d"¦fmt.Sprintfž go.string."POST"¦"".(*Client).doŠ .type."".NoSuchContainerœ "runtime.newobjectÎ ð runtime.duffzeroº +4runtime.writebarrierstringè +Bgo.itab.*"".NoSuchContainer.errorÄ 0type.*"".NoSuchContainerÚ type.errorò Bgo.itab.*"".NoSuchContainer.error†  runtime.typ2Itabà 6type."".ContainerNotRunningò "runtime.newobjectâ 4runtime.writebarrierstring„Jgo.itab.*"".ContainerNotRunning.errorÚ8type.*"".ContainerNotRunningðtype.errorˆJgo.itab.*"".ContainerNotRunning.errorœ runtime.typ2Itab`°$"".autotmp_0705type.*uint8"".autotmp_0704¿8type.*"".ContainerNotRunning"".autotmp_0702¯0type.*"".NoSuchContainer"".autotmp_0701Ÿ"type."".doOptions"".autotmp_0700"type.interface {}"".autotmp_0699ß"type.interface {}"".autotmp_0697o&type.[]interface {}"".autotmp_06968type.*"".ContainerNotRunning"".autotmp_06950type.*"".NoSuchContainer"".autotmp_0693Ïtype.uint"".autotmp_0692¿type.string"".autotmp_0691?(type.[2]interface {} "".errÿtype.error"".pathŸtype.string "".~r2@type.error"".timeout0type.uint +"".idtype.string"".ctype.*"".Client@%°¹¯°Ê¯°X¯°¯° °8Ø=ä• ß ¸  *ÉÉ€Šœ8ÀTgclocals·42785a4ae44025160cf24924f7d01efbTgclocals·5c42a9dee0c88889a167a0d13b7c2026ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ:"".(*Client).RestartContainerÀ ¶ eH‹ %H„$pÿÿÿH;AwèëâHìHÇ„$8HÇ„$@H‹œ$ H‰œ$°H‹œ$(H‰œ$¸H‹œ$0H‰\$pH¼$ð1ÀèHœ$ðHƒû„ÉHÇÂHÇÁH‰œ$ØH‰”$àH‰Œ$èHH‰$Hœ$°H‰\$èH‹L$H‹D$H‹œ$ØH‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH‰$H\$pH‰\$èH‹L$H‹D$H‹œ$ØHƒÃH‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ØH‰\$H‹œ$àH‰\$H‹œ$èH‰\$ èH‹L$(H‹D$0Hœ$ÀHÇHÇCHÇCH‹´$H‰4$H5Hl$H‰ïH¥H¥H‰Œ$€H‰L$H‰„$ˆH‰D$ Hœ$ÀHl$(H‰ïH‰ÞH¥H¥H¥èH‹L$XH‹D$`H‰„$H‹T$hH‰”$˜Hù”…ÓHH‰$èH‹L$H‰ÏHƒù„ª1ÀèH‰L$xH‰ $Hƒ<$„ƒH‹œ$ H‰\$H‹œ$(H‰\$èH‹\$xH‰\$xH‹1íH9ètH‹\$xH‰œ$@H‰„$8HÄÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%éqÿÿÿ‰éOÿÿÿHƒøtH‰„$8H‰”$@HÄÃHÇ„$8HÇ„$@HÄÉé0ýÿÿ, +00runtime.morestack_noctxtêð runtime.duffzeroètype.string”runtime.convT2Eþ2runtime.writebarrierifaceŒtype.uint²runtime.convT2E¤2runtime.writebarrieriface²Ngo.string."/containers/%s/restart?t=%d"¦fmt.Sprintfž go.string."POST"¦"".(*Client).doŒ .type."".NoSuchContainerž "runtime.newobjectÐ ð runtime.duffzero¶ +4runtime.writebarrierstringØ +Bgo.itab.*"".NoSuchContainer.error® 0type.*"".NoSuchContainerÄ type.errorÜ Bgo.itab.*"".NoSuchContainer.errorð  runtime.typ2Itab` "".autotmp_0721¯0type.*"".NoSuchContainer"".autotmp_0720Ÿ"type."".doOptions"".autotmp_0719"type.interface {}"".autotmp_0718ß"type.interface {}"".autotmp_0716o&type.[]interface {}"".autotmp_07150type.*"".NoSuchContainer"".autotmp_0713¿type.uint"".autotmp_0712¿type.string"".autotmp_0711?(type.[2]interface {} "".errÿtype.error"".pathŸtype.string "".~r2@type.error"".timeout0type.uint +"".idtype.string"".ctype.*"".Client2% ®Ÿ _Ÿ Ÿ  à.þ=ä• Ó   ÉÉ€ˆÆTgclocals·1da38d5d89527cd2ab312249704d85d7Tgclocals·5bacfca50b7e6b4494d1c0d96e8b2c7cò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ6"".(*Client).PauseContainer€ ú eH‹ %HD$ˆH;AwèëåHìøHÇ„$HÇ„$ H‹œ$H‰œ$¨H‹œ$H‰œ$°Hœ$¸HÇHÇCHœ$¸Hƒû„sHÇÂHÇÁH‰œ$àH‰”$èH‰Œ$ðHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$àH‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$ èH‹L$(H‹D$0Hœ$ÈHÇHÇCHÇCH‹´$H‰4$H5Hl$H‰ïH¥H¥H‰L$xH‰L$H‰„$€H‰D$ Hœ$ÈHl$(H‰ïH‰ÞH¥H¥H¥èH‹L$XH‹D$`H‰„$ˆH‹T$hH‰”$Hù”…ÓHH‰$èH‹L$H‰ÏHƒù„ª1ÀèH‰L$pH‰ $Hƒ<$„ƒH‹œ$H‰\$H‹œ$H‰\$èH‹\$pH‰\$pH‹1íH9ètH‹\$pH‰œ$ H‰„$HÄøÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%éqÿÿÿ‰éOÿÿÿHƒøtH‰„$H‰”$ HÄøÃHÇ„$HÇ„$ HÄøÉé†ýÿÿ$ +*0runtime.morestack_noctxtØtype.string„runtime.convT2Eî2runtime.writebarrierifaceü@go.string."/containers/%s/pause"ðfmt.Sprintfè go.string."POST"ê"".(*Client).doÐ.type."".NoSuchContainerâ"runtime.newobject”ð runtime.duffzeroú4runtime.writebarrierstringœ Bgo.itab.*"".NoSuchContainer.errorò 0type.*"".NoSuchContainerˆ +type.error  +Bgo.itab.*"".NoSuchContainer.error´ + runtime.typ2ItabPð"".autotmp_07350type.*"".NoSuchContainer"".autotmp_0734_"type."".doOptions"".autotmp_0733¿"type.interface {}"".autotmp_0731/&type.[]interface {}"".autotmp_07300type.*"".NoSuchContainer"".autotmp_0728Ÿtype.string"".autotmp_0727(type.[1]interface {} "".errßtype.error"".pathÿtype.string "".~r10type.error +"".idtype.string"".ctype.*"".Client2"ðÓïð_ïðïð +€.œ:Œ’ Ó  +Áv}ˆÄTgclocals·fe0d626f6a1a9cb0d3493cb8c292091bTgclocals·556e2b84f9ef2d507be121d828e30b96ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ:"".(*Client).UnpauseContainer€ ú eH‹ %HD$ˆH;AwèëåHìøHÇ„$HÇ„$ H‹œ$H‰œ$¨H‹œ$H‰œ$°Hœ$¸HÇHÇCHœ$¸Hƒû„sHÇÂHÇÁH‰œ$àH‰”$èH‰Œ$ðHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$àH‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$ èH‹L$(H‹D$0Hœ$ÈHÇHÇCHÇCH‹´$H‰4$H5Hl$H‰ïH¥H¥H‰L$xH‰L$H‰„$€H‰D$ Hœ$ÈHl$(H‰ïH‰ÞH¥H¥H¥èH‹L$XH‹D$`H‰„$ˆH‹T$hH‰”$Hù”…ÓHH‰$èH‹L$H‰ÏHƒù„ª1ÀèH‰L$pH‰ $Hƒ<$„ƒH‹œ$H‰\$H‹œ$H‰\$èH‹\$pH‰\$pH‹1íH9ètH‹\$pH‰œ$ H‰„$HÄøÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%éqÿÿÿ‰éOÿÿÿHƒøtH‰„$H‰”$ HÄøÃHÇ„$HÇ„$ HÄøÉé†ýÿÿ$ +*0runtime.morestack_noctxtØtype.string„runtime.convT2Eî2runtime.writebarrierifaceüDgo.string."/containers/%s/unpause"ðfmt.Sprintfè go.string."POST"ê"".(*Client).doÐ.type."".NoSuchContainerâ"runtime.newobject”ð runtime.duffzeroú4runtime.writebarrierstringœ Bgo.itab.*"".NoSuchContainer.errorò 0type.*"".NoSuchContainerˆ +type.error  +Bgo.itab.*"".NoSuchContainer.error´ + runtime.typ2ItabPð"".autotmp_07490type.*"".NoSuchContainer"".autotmp_0748_"type."".doOptions"".autotmp_0747¿"type.interface {}"".autotmp_0745/&type.[]interface {}"".autotmp_07440type.*"".NoSuchContainer"".autotmp_0742Ÿtype.string"".autotmp_0741(type.[1]interface {} "".errßtype.error"".pathÿtype.string "".~r10type.error +"".idtype.string"".ctype.*"".Client2"ðÓïð_ïðïð +€.º:Œ’ Ó  +Áv}ˆÄTgclocals·fe0d626f6a1a9cb0d3493cb8c292091bTgclocals·556e2b84f9ef2d507be121d828e30b96ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ2"".(*Client).TopContainer€ôeH‹ %H„$(ÿÿÿH;AwèëâHìXH¼$ˆ1ÀèHÇ„$¸HÇ„$ÀHÇ„$àHÇ„$èHH‰$èH‹„$€H‹\$H‰\$xHƒø„H‹œ$xH‰œ$ÀH‰„$ÈHœ$ÐHÇHÇCHœ$ÐHƒû„âHÇÅHÇÂH‰œ$ H‰¬$(H‰”$0HH‰$Hœ$ÀH‰\$èH‹T$H‹D$H‹œ$ H‰$H‰”$ H‰T$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹T$(H‹D$0H‰”$àH‰„$èH‹œ$hH‰œ$ÀH‹œ$pH‰œ$ÈH‹œ$àH‰œ$°H‹œ$èH‰œ$¸H¼$81ÀèHœ$8Hƒû„§HÇÂHÇÅH‰œ$ H‰”$(H‰¬$0HH‰$Hœ$ÀH‰\$èH‹T$H‹D$H‹œ$ H‰$H‰”$ H‰T$H‰„$¨H‰D$èHH‰$Hœ$°H‰\$èH‹T$H‹D$H‹œ$ HƒÃH‰$H‰”$ H‰T$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹T$(H‹D$0Hœ$HÇHÇCHÇCH‹´$`H‰4$H5Hl$H‰ïH¥H¥H‰”$€H‰T$H‰„$ˆH‰D$ Hœ$Hl$(H‰ïH‰ÞH¥H¥H¥èL‹L$xH‹|$@H‰¼$ðH‹t$HH‰´$øH‹l$PH‰¬$H‹T$XH‹D$`H‰„$L‹D$hL‰„$˜Hú”…èHH‰$èH‹T$H‰×Hƒú„¿1ÀèH‰T$pH‰$Hƒ<$„˜H‹œ$hH‰\$H‹œ$pH‰\$èH‹\$pH‰\$pH‹1íH9èt2H‹t$xH¬$ˆH‰ïèH‹\$pH‰œ$ÀH‰„$¸HÄXÃHH‰$HH‰\$HH‰\$èH‹D$량%é\ÿÿÿ‰é:ÿÿÿHƒøt+H¬$ˆH‰ïL‰ÎèH‰„$¸L‰„$ÀHÄXÃH‰<$H‰t$H‰l$HH‰”$ H‰T$L‰Œ$¨L‰L$ èH‹l$xH‹D$(H‹T$0Hƒøt+H‰îH¬$ˆH‰ïèH‰„$¸H‰”$ÀHÄXÃH‰îH¬$ˆH‰ïèHÇ„$¸HÇ„$ÀHÄXÉéRüÿÿ‰éûÿÿH +00runtime.morestack_noctxt`è runtime.duffzeroÎ"type."".TopResultà"runtime.newobjectôtype.string runtime.convT2EŠ2runtime.writebarrieriface˜.go.string."?ps_args=%s"Œfmt.SprintfÞð runtime.duffzeroÜtype.stringˆ runtime.convT2Eò 2runtime.writebarrieriface€ +type.string¬ +runtime.convT2Ež 2runtime.writebarrieriface¬ @go.string."/containers/%s/top%s"  fmt.Sprintf˜ go.string."GET" "".(*Client).doÞ.type."".NoSuchContainerð"runtime.newobject¢ð runtime.duffzeroˆ4runtime.writebarrierstringªBgo.itab.*"".NoSuchContainer.errorâØ runtime.duffcopyª0type.*"".NoSuchContainerÀtype.errorØBgo.itab.*"".NoSuchContainer.errorì runtime.typ2ItabÒØ runtime.duffcopy¬$type.*"".TopResultê.encoding/json.UnmarshalºØ runtime.duffcopyØ runtime.duffcopyа0"".autotmp_0773Ï0type.*"".NoSuchContainer"".autotmp_0772Ÿ"type."".doOptions"".autotmp_0771"type.interface {}"".autotmp_0770"type.interface {}"".autotmp_0768&type.[]interface {}"".autotmp_0767ï"type.interface {}"".autotmp_0765o&type.[]interface {}"".autotmp_07620type.*"".NoSuchContainer"".autotmp_0761type.string"".autotmp_0760Ïtype.string"".autotmp_0759type.string"".autotmp_0758?(type.[2]interface {}"".autotmp_0756¯type.string"".autotmp_0755(type.[1]interface {}"".&result¿$type.*"".TopResult "".errtype.error"".bodyÏtype.[]uint8"".path¯type.string"".argsïtype.string "".~r3°type.error "".~r2P"type."".TopResult"".psArgs0type.string +"".idtype.string"".ctype.*"".Client>%°ì¯°r¯°s¯°2¯°À PêL" +”úÁ è+C+3 4o v¾Ì€´ñŒTgclocals·950e6e6b9e7c3fe47672289f0a6f6e8bTgclocals·6a9f496e2cfbe0515ededb4c2d64a743ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ$"".(*Client).StatsÀÀeH‹ %H„$xÿÿÿH;AwèëâHìHH‰$èH‹D$H‰„$˜H‹¬$H‰(HH‰$èH‹|$H‰¼$ˆH¬$H‰îèHÇ„$HHÇ„$PHÇ„$HHÇ„$PHH‰$èH‹\$H‰œ$HH‰$HÇD$èH‹D$H‹œ$H‰$H‰D$èHH‰$èH‹\$H‰\$xHH‰$èH‹\$H‰\$hèH‹$H‹\$H‰\$PH‹\$xH‰$H‰D$èH‹\$hH‰$H‹\$PH‰\$èH¼$à1ÀèHœ$àH-H‰+H‰\$HH‰$Hƒ<$„òHƒ$H‹œ$ˆH‰\$èH‹\$HH‰$Hƒ<$„»Hƒ$H‹œ$H‰\$èH‹L$HH¬$HH‰iH‰ $Hƒ<$„xHƒ$ H‹\$xH‰\$èH‹\$HSjèYYH…À…>HH‰$èH‹L$H-H‰)H‰L$@H‰ $Hƒ<$„ÿHƒ$H‹œ$˜H‰\$èH‹\$@H‰$Hƒ<$„ÈHƒ$H‹œ$ˆH‰\$èH‹\$@H‰$Hƒ<$„‘Hƒ$H‹\$hH‰\$èH‹\$@H‰$Hƒ<$„]Hƒ$ H‹œ$H‰\$èH‹\$@SjèYYHH‰$èH‹\$H‰œ$€HH‰$HÇD$èH‹L$H‹œ$€H‰$H‰L$èH‹œ$€H‹+H‰,$H QjèYYH…À…¯HH‰$èH‹D$H-H‰(H‰D$8H‰$Hƒ<$„pHƒ$H‹œ$ˆH‰\$èH‹\$8H‰$Hƒ<$„9Hƒ$H‹\$xH‰\$èH‹\$8H‰$Hƒ<$„Hƒ$H‹œ$€H‰\$èH‹\$8SjèYYH‹\$xH‹+H‰l$XH‹1íH9è„ŽH‹L$XH‰„$ÐH‰„$ H‰Œ$ØH‰Œ$¨HH‰$èH‹|$H‰ùHƒÿ„@1ÀèH‰L$0H‰ $Hƒ<$„H‹œ$ H‰\$H‹œ$¨H‰\$èH‹\$0H‰\$(HH‰$èH‹\$H‰\$pHH‰$èH‹D$H‹\$pH‰$H‰D$èH‹L$pH‹\$(H‰$HH‰„$ÀH‰D$H‰Œ$ÈH‰L$èH‹T$H‹L$ H‰Œ$¸H‹-H9ê„åHƒúH‰”$°tH‰”$HH‰Œ$PèHÄÃH‹\$pH‹+H‰l$`HH‰$H‹œ$ˆH‹kH‰l$H\$`H‰\$èHH‰$èH‹D$H‹\$pH‰$H‰D$èH‹\$pH‹+H‹\$(H‰$HH‰„$ÀH‰D$H‰¬$ÈH‰l$èH‹T$H‹L$ H‰Œ$¸H‹-H9ê…ÿÿÿH‰”$°H‰$H‰L$H‹-H‰l$H‹-H‰l$èH‹Œ$¸H‹”$°¶\$ €ûuéÎþÿÿHÇ„$HHÇ„$PèHÄÉ%éÛýÿÿ‰é¹ýÿÿHH‰$HH‰\$HH‰\$èH‹D$é@ýÿÿ‰%éïüÿÿ‰%é»üÿÿ‰%é„üÿÿèHÄÉ%é—ûÿÿ‰%écûÿÿ‰%é,ûÿÿ‰%éõúÿÿèHÄÉ%é|úÿÿ‰%é9úÿÿ‰%éúÿÿž +00runtime.morestack_noctxtPtype.*"".Clientb"runtime.newobject (type."".StatsOptions²"runtime.newobjectìØ runtime.duffcopyÚtype.chan errorì"runtime.newobject”type.chan error¸ runtime.makechanî.runtime.writebarrierptrü&type.*io.PipeReaderŽ"runtime.newobject°&type.*io.PipeWriterÂ"runtime.newobjectàio.Pipe¢.runtime.writebarrierptrÒ.runtime.writebarrierptrðì runtime.duffzeroŽ"".func·003ê.runtime.writebarrierptrÀ.runtime.writebarrierptr¨.runtime.writebarrierptrÂ"runtime.deferprocæÄtype.struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }ø"runtime.newobject "".func·004ì .runtime.writebarrierptr +.runtime.writebarrierptr’ .runtime.writebarrierptrè .runtime.writebarrierptr‚ runtime.newproc” &type.chan struct {}¦ "runtime.newobjectÎ &type.chan struct {}ò  runtime.makechan¨ .runtime.writebarrierptrÔ (runtime.closechan·fä "runtime.deferprocˆ¬type.struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }š"runtime.newobject²"".func·005Ž.runtime.writebarrierptrÞ.runtime.writebarrierptr´.runtime.writebarrierptrÎruntime.newprocú@go.itab.*io.PipeReader.io.Readerè4type.encoding/json.Decoderú"runtime.newobject¬Ä runtime.duffzero’2runtime.writebarrieriface´type.*"".StatsÆ"runtime.newobjectètype."".Statsú"runtime.newobjectª.runtime.writebarrierptrÔtype.**"".Stats’>encoding/json.(*Decoder).DecodeÄ io.EOFž&runtime.deferreturnÖ*type.chan<- *"".Statsž"runtime.chansend1¬type."".Stats¾"runtime.newobjectî.runtime.writebarrierptržtype.*"".StatsÜ>encoding/json.(*Decoder).DecodeŽ io.EOFÐ io.EOFè io.EOFüruntime.ifaceeqö&runtime.deferreturnº&type.*io.PipeReaderÐtype.io.Readerè@go.itab.*io.PipeReader.io.Readerü runtime.typ2Itabä&runtime.deferreturnà&runtime.deferreturn2"".autotmp_0803¯6type.*encoding/json.Decoder"".autotmp_08026type.*encoding/json.Decoder"".autotmp_0800otype.io.Reader"".autotmp_0799Ÿ®type.*struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"".autotmp_0798Ætype.*struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }"".autotmp_0797Oºtype.struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"".autotmp_0796ÿ¼type.*struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"".autotmp_0795ï&type.*io.PipeWriter"".autotmp_0794ß&type.*io.PipeReader"".autotmp_0793type.error"".autotmp_0792type.*"".Stats"".autotmp_0791type.*"".Stats"".autotmp_0790Ïtype.*"".Stats"".autotmp_0786&type.*io.PipeReader"".&stats¯type.**"".Stats"".&quit(type.*chan struct {}"".&readCloserŸ(type.**io.PipeReader"".&errCï type.*chan error"".&writeCloser¿(type.**io.PipeWriter"".&optsÿ*type.*"".StatsOptions +"".&cß type.**"".Client$encoding/json.r·2Ïtype.io.Reader "".err¯type.error"".decoder¿6type.*encoding/json.Decoder"".retErrptype.error†%ù×h¬í«v=$ lò +ªQr"Ã,×Q)¬ªBV8( ¡ &E$0+2€0(]&E!LlUS+ 8ŽS V +L‘AŸ<D3pTgclocals·378f3e900c220a5cad1989c9c06023bdTgclocals·1bc79a478470e6209b0ca20d87c54bd3ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ4"".(*Client).KillContainerà +Ú +eH‹ %HD$¨H;AwèëåHìØHÇ„$HÇ„$Hœ$èH¬$ÀH‰ïH‰ÞH¥H¥H¥HH‰$Hœ$ÀH‰\$èH\$H,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH,$H‰ïH‰ÞH¥H¥Hœ$èHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥H‰Œ$˜H‰L$0H‰„$ H‰D$8èH‹L$@H‹D$HHœ$¨HÇHÇCHÇCH‹´$àH‰4$H5Hl$H‰ïH¥H¥H‰L$xH‰L$H‰„$€H‰D$ Hœ$¨Hl$(H‰ïH‰ÞH¥H¥H¥èH‹L$XH‹D$`H‰„$ˆH‹T$hH‰”$Hù”…ÐHH‰$èH‹L$H‰ÏHƒù„§1ÀèH‰L$pH‰ $Hƒ<$„€Hœ$èHl$H‰ïH‰ÞH¥H¥èH‹\$pH‰\$pH‹1íH9ètH‹\$pH‰œ$H‰„$HÄØÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%étÿÿÿ‰éRÿÿÿHƒøtH‰„$H‰”$HÄØÃHÇ„$HÇ„$HÄØÃ& +*0runtime.morestack_noctxt²8type."".KillContainerOptionsÞruntime.convT2EŽ"".queryString°0go.string."/containers/"ˆ$go.string."/kill?"ä*runtime.concatstring4Ü go.string."POST"Þ"".(*Client).doÄ.type."".NoSuchContainerÖ"runtime.newobjectˆð runtime.duffzeroè4runtime.writebarrierstringŠBgo.itab.*"".NoSuchContainer.errorà0type.*"".NoSuchContainerötype.errorŽ Bgo.itab.*"".NoSuchContainer.error¢  runtime.typ2Itab`°"".autotmp_0813Ï0type.*"".NoSuchContainer"".autotmp_0812_"type."".doOptions"".autotmp_08110type.*"".NoSuchContainer"".autotmp_0810type.string"".autotmp_0809/8type."".KillContainerOptions "".errŸtype.error"".path¿type.string "".~r1@type.error"".opts8type."".KillContainerOptions"".ctype.*"".Client,"°Š¯°_¯°¯°*˜ :Æ’ Ð#n€…½Tgclocals·bd92ef728a38faac78badef3588d832fTgclocals·1705812f15ec71868ae696027438a358ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ8"".(*Client).RemoveContainerà +Ú +eH‹ %HD$¨H;AwèëåHìØHÇ„$HÇ„$Hœ$èH¬$ÀH‰ïH‰ÞH¥H¥H¥HH‰$Hœ$ÀH‰\$èH\$H,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH,$H‰ïH‰ÞH¥H¥Hœ$èHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥H‰Œ$˜H‰L$0H‰„$ H‰D$8èH‹L$@H‹D$HHœ$¨HÇHÇCHÇCH‹´$àH‰4$H5Hl$H‰ïH¥H¥H‰L$xH‰L$H‰„$€H‰D$ Hœ$¨Hl$(H‰ïH‰ÞH¥H¥H¥èH‹L$XH‹D$`H‰„$ˆH‹T$hH‰”$Hù”…ÐHH‰$èH‹L$H‰ÏHƒù„§1ÀèH‰L$pH‰ $Hƒ<$„€Hœ$èHl$H‰ïH‰ÞH¥H¥èH‹\$pH‰\$pH‹1íH9ètH‹\$pH‰œ$H‰„$HÄØÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%étÿÿÿ‰éRÿÿÿHƒøtH‰„$H‰”$HÄØÃHÇ„$HÇ„$HÄØÃ& +*0runtime.morestack_noctxt²go.string."/containers/%s/copy"†fmt.SprintfðØ runtime.duffcopy¼ @type."".CopyFromContainerOptionsè runtime.convT2E¼ + go.string."POST"¾ "".(*Client).doò .type."".NoSuchContainer„ "runtime.newobject¶ ð runtime.duffzero–4runtime.writebarrierstring¸Bgo.itab.*"".NoSuchContainer.errorŽ0type.*"".NoSuchContainer¤type.error¼Bgo.itab.*"".NoSuchContainer.errorÐ runtime.typ2Itabþ"type.bytes.Buffer"runtime.newobjectÂÈ runtime.duffzeroÂ2runtime.writebarriersliceä>go.itab.*bytes.Buffer.io.Readeræio.Copy¸$type.*bytes.BufferÎtype.io.Readeræ>go.itab.*bytes.Buffer.io.Readerú runtime.typ2ItabÀ*"".autotmp_0839type.*uint8"".autotmp_0838ß$type.*bytes.Buffer"".autotmp_0837$type.*bytes.Buffer"".autotmp_0836type.*uint8"".autotmp_08350type.*"".NoSuchContainer"".autotmp_0834¿"type."".doOptions"".autotmp_0833ß"type.interface {}"".autotmp_0831&type.[]interface {}"".autotmp_0829Ï0type.*"".NoSuchContainer"".autotmp_0828$type.*bytes.Buffer"".autotmp_08270type.*"".NoSuchContainer"".autotmp_0826_@type."".CopyFromContainerOptions"".autotmp_0824¿(type.[1]interface {}"".autotmp_08230type.*"".NoSuchContainerbytes.buf·2Ÿtype.[]uint8 "".errŸtype.error"".bodyïtype.[]uint8 "".url¿type.string "".~r1ptype.error"".opts@type."".CopyFromContainerOptions"".ctype.*"".ClientB%À·¿Àæ¿À_¿Àô¿ÀWð +BŠ =Ђˆ ÐÝE:ZIévqkc I½ +YR J4Tgclocals·387212f77114c618c992aca1d7f6e2d3Tgclocals·4e703ba17638508264f032b5f70033cdò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ4"".(*Client).WaitContainerÀ ® eH‹ %HD$°H;AwèëåHìÐHÇ„$øHÇ„$Hœ$¸HÇHÇCHÇCHH,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹´$èH‰t$H5Hl$ H‰ïH¥H¥èH\$0Hl$H‰ïH‰ÞH¥H¥H‹´$ØH‰4$H5Hl$H‰ïH¥H¥Hœ$¸Hl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$ H‹\$HH‰œ$¨H‹\$PH‰œ$°H‹L$XH‹D$`H‰„$€H‹T$hH‰”$ˆHù”…ßHH‰$èH‹L$H‰ÏHƒù„¶1ÀèH‰L$pH‰ $Hƒ<$„H‹œ$àH‰\$H‹œ$èH‰\$èH‹\$pH‰\$pH‹1íH9èt)HÇ„$ðH‹\$pH‰œ$H‰„$øHÄÐÃHH‰$HH‰\$HH‰\$èH‹D$먉%éeÿÿÿ‰éCÿÿÿHƒøt$HÇ„$ðH‰„$øH‰”$HÄÐÃHH‰$èH‹L$H‰L$xH‹œ$ H‰$H‹œ$¨H‰\$H‹œ$°H‰\$HH‰„$H‰D$H‰Œ$˜H‰L$ èH‹D$(H‹L$0Hƒøt$HÇ„$ðH‰„$øH‰Œ$HÄÐÃH‹\$xH‹+H‰¬$ðHÇ„$øHÇ„$HÄÐÃ( +*0runtime.morestack_noctxt¸0go.string."/containers/"–"go.string."/wait"¸*runtime.concatstring3† go.string."POST"Ú"".(*Client).doŽ.type."".NoSuchContainer "runtime.newobjectÒð runtime.duffzero¸4runtime.writebarrierstringÚBgo.itab.*"".NoSuchContainer.errorÈ0type.*"".NoSuchContainerÞtype.erroröBgo.itab.*"".NoSuchContainer.errorŠ runtime.typ2Itab  type.*struct { StatusCode int }Þ +.encoding/json.Unmarshal` "".autotmp_0851¿0type.*"".NoSuchContainer"".autotmp_0850/"type."".doOptions"".autotmp_08470type.*"".NoSuchContainer +"".&r¯>type.*struct { StatusCode int } "".errŸtype.error"".body_type.[]uint8 "".~r2@type.error "".~r10type.int +"".idtype.string"".ctype.*"".Client:" ¾Ÿ kŸ ™Ÿ /Ÿ + 4² :ý ß$[$9 ›Q¯½VrTgclocals·eda57d60e805297221010beefc01cf3dTgclocals·ec5d02e01ec699817d1c71b60a3fa4d0ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ8"".(*Client).CommitContainerÀ´eH‹ %H„$8ÿÿÿH;AwèëâHìHHÇ„$¸HÇ„$ÀHœ$XH¬$ðH‰ïH‰ÞèHH‰$Hœ$ðH‰\$èH\$H,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH,$H‰ïH‰ÞH¥H¥H‰Œ$°H‰L$H‰„$¸H‰D$èL‹D$ H‹T$(Hœ$ØHÇHÇCHÇCH‹Œ$¨HH‰„$ H‰„$ØH‰Œ$¨H‰Œ$àH‹´$PH‰4$H5Hl$H‰ïH¥H¥L‰„$€L‰D$H‰”$ˆH‰T$ Hœ$ØHl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$ÀH‹\$HH‰œ$ÈH‹\$PH‰œ$ÐH‹L$XH‹D$`H‰„$H‹T$hH‰”$˜Hù”…ÜHH‰$èH‹L$H‰ÏHƒù„³1ÀèH‰L$pH‰ $Hƒ<$„ŒHœ$XHl$H‰ïH‰ÞH¥H¥èH‹\$pH‰\$pH‹1íH9èt)HÇ„$°H‹\$pH‰œ$ÀH‰„$¸HÄHÃHH‰$HH‰\$HH‰\$èH‹D$먉%éhÿÿÿ‰éFÿÿÿHƒøt$HÇ„$°H‰„$¸H‰”$ÀHÄHÃHH‰$èH‹L$H‰L$xH‹œ$ÀH‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$HH‰„$ H‰D$H‰Œ$¨H‰L$ èH‹D$(H‹L$0Hƒøt$HÇ„$°H‰„$¸H‰Œ$ÀHÄHÃH‹\$xH‰œ$°HÇ„$¸HÇ„$ÀHÄHÃ0 +00runtime.morestack_noctxt¨Ì runtime.duffcopy¶"".(*Client).ResizeContainerTTY –eH‹ %H„$hÿÿÿH;AwèëâHìHÇ„$HHÇ„$PHH‰$HÇD$èH‹D$H‰„$€H‰D$xH‹œ$8H‰$èH‹L$H‹D$HH‹3H‹kH‰Œ$˜H‰„$ H‰´$¸H‰´$ÈH‰¬$ÀH‰¬$ÐHH‰$èH‹\$Hƒû„¤HÇÂHÇÁH‰œ$H‰”$H‰Œ$H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$èHH‰$H‹\$xH‰\$Hœ$ÈH‰\$Hœ$H‰\$èH‹œ$€H‰\$pH‹œ$@H‰$èH‹L$H‹D$HH‹3H‹kH‰Œ$ˆH‰„$H‰´$¨H‰´$ÈH‰¬$°H‰¬$ÐHH‰$èH‹\$Hƒû„“HÇÂHÇÁH‰œ$H‰”$H‰Œ$H‹œ$H‰$H‹œ$ˆH‰\$H‹œ$H‰\$èHH‰$H‹\$pH‰\$Hœ$ÈH‰\$Hœ$H‰\$èH‹œ$€H‰$èH‹L$H‹D$Hœ$èHÇHÇCHÇCHH,$H‰ïH‰ÞH¥H¥H‹œ$(H‰\$H‹´$0H‰t$H5Hl$ H‰ïH¥H¥H‰Œ$ØH‰L$0H‰„$àH‰D$8èH\$@Hl$H‰ïH‰ÞH¥H¥H‹´$ H‰4$H5Hl$H‰ïH¥H¥Hœ$èHl$(H‰ïH‰ÞH¥H¥H¥èH‹L$`H‹D$hH‰Œ$HH‰„$PHÄÉéfþÿÿ‰éUýÿÿ0 +00runtime.morestack_noctxt€&type.net/url.Values¤runtime.makemapêstrconv.ItoaŒgo.string."h"ˆtype.[1]stringš"runtime.newobjectÚ4runtime.writebarrierstringè&type.net/url.ValuesÂ$runtime.mapassign1þstrconv.Itoa go.string."w"œtype.[1]string®"runtime.newobjectî4runtime.writebarrierstringü&type.net/url.ValuesÖ $runtime.mapassign1ø *net/url.Values.EncodeØ +0go.string."/containers/"¶ (go.string."/resize?"Œ *runtime.concatstring4Ú  go.string."POST"® "".(*Client).dop°*"".autotmp_0896_"type."".doOptions"".autotmp_0895type.*[1]string"".autotmp_0893type.string"".autotmp_0892type.[]string"".autotmp_0891type.string"".autotmp_0890type.string"".autotmp_0889/type.[]string"".autotmp_0888Ÿtype.string"".autotmp_0887type.string net/url.value·3Ÿtype.stringnet/url.key·2ßtype.stringnet/url.v·1Ï&type.net/url.Values net/url.value·3ÿtype.stringnet/url.key·2¿type.stringnet/url.v·1¿&type.net/url.Values"".params¯&type.net/url.Values "".~r3Ptype.error"".width@type.int"".height0type.int +"".idtype.string"".ctype.*"".Client%°×¯°*È=ŠŠö 8Q#X`4X`4ŠQ:Tgclocals·78ce512784b85b97418b7726f81bf730Tgclocals·564b0cd8045e1e3a560aecfc019285daò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ8"".(*Client).ExportContainer  – eH‹ %H„$hÿÿÿH;AwèëâHìHÇ„$HHÇ„$PH‹œ$0Hƒû…ÜHH‰$èH‹L$H‰ÏHƒù„³1ÀèH‰Œ$€H‰ $Hƒ<$„‰Hœ$(Hl$H‰ïH‰ÞH¥H¥èH‹œ$€H‰œ$€H‹1íH9èt H‹œ$€H‰œ$PH‰„$HHÄÃHH‰$HH‰\$HH‰\$èH‹D$뱉%ékÿÿÿ‰éFÿÿÿHœ$¨HÇHÇCHœ$¨Hƒû„pHÇÅHÇÂH‰œ$¸H‰¬$ÀH‰”$ÈHH‰$Hœ$(H‰\$èH‹T$H‹D$H‹œ$¸H‰$H‰”$˜H‰T$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$¸H‰\$H‹œ$ÀH‰\$H‹œ$ÈH‰\$ èL‹D$(H‹T$0H¼$Ð1ÀèHÇÆ@ˆ´$ÐH´$8H¬$ðH‰ïH¥H¥H‹´$ H‰4$H5Hl$H‰ïH¥H¥L‰„$ˆL‰D$H‰”$H‰T$ Hœ$ÐHl$(H‰ïH‰ÞèèH‹L$pH‹D$xH‰Œ$HH‰„$PHÄÉé‰þÿÿ( +00runtime.morestack_noctxt¤.type."".NoSuchContainer¶"runtime.newobjectèð runtime.duffzeroÎ4runtime.writebarrierstringüBgo.itab.*"".NoSuchContainer.errorØ0type.*"".NoSuchContainerîtype.error†Bgo.itab.*"".NoSuchContainer.errorš runtime.typ2Itabútype.string¦runtime.convT2E2runtime.writebarrierifacežBgo.string."/containers/%s/export"’fmt.SprintfÄÜ runtime.duffzero¶ go.string."GET"² +„ runtime.duffcopy¼ +&"".(*Client).streamp°"".autotmp_0912*type."".streamOptions"".autotmp_0911ÿ"type.interface {}"".autotmp_0909¿&type.[]interface {}"".autotmp_0907¯0type.*"".NoSuchContainer"".autotmp_0904ß(type.[1]interface {}"".autotmp_09030type.*"".NoSuchContainer "".urlŸtype.string "".~r1Ptype.error"".optsgo.string."No such container: "ª*runtime.concatstring20€ "".~r0type.string "".err0type.*"".NoSuchContainer€R€DÀ”15S +RnTgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþF"".(*ContainerAlreadyRunning).Error€úeH‹ %H;awèëêHƒì0HÇD$@HÇD$HHH,$H‰ïH‰ÞH¥H¥H‹|$8Hƒÿt-H/H|$H‰îH¥H¥èH‹\$ H‰\$@H‹\$(H‰\$HHƒÄ0ÉëÏ + 0runtime.morestack_noctxt^Ngo.string."Container already running: "¸*runtime.concatstring20` "".~r0type.string "".err@type.*"".ContainerAlreadyRunning`^_`€ ®,T +[%Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ>"".(*ContainerNotRunning).Error€úeH‹ %H;awèëêHƒì0HÇD$@HÇD$HHH,$H‰ïH‰ÞH¥H¥H‹|$8Hƒÿt-H/H|$H‰îH¥H¥èH‹\$ H‰\$@H‹\$(H‰\$HHƒÄ0ÉëÏ + 0runtime.morestack_noctxt^Fgo.string."Container not running: "¸*runtime.concatstring20` "".~r0type.string "".err8type.*"".ContainerNotRunning`^_`€ Â,T +[%Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".(*Env).GetÀ®eH‹ %H;awèëêHƒì8HÇD$XHÇD$`H‹\$@H‰$èH‹D$H‹T$HH‹L$PHH‰$H‰D$H‰T$(H‰T$H‰L$0H‰L$èH‹\$ HƒûtH‹ H‹kH‰L$XH‰l$`HƒÄ8Éëæ + + 0runtime.morestack_noctxtl"".(*Env).Map˜,type.map[string]stringÜ4runtime.mapaccess1_faststrPp +"".autotmp_0921type.string"".autotmp_0920type.string"".value0type.string "".keytype.string "".envtype.*"".Envpxop   +(,t +5kTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ "".(*Env).Exists€úeH‹ %H;awèëêHƒì@H‹\$HH‰$èH‹D$H‹T$PH‹L$XHH‰$H‰D$H‰T$0H‰T$H‰L$8H‰L$èH‹L$ ¶\$(Hƒùt ˆ\$`HƒÄ@Éëó + + 0runtime.morestack_noctxtH"".(*Env).Mapt,type.map[string]string¸4runtime.mapaccess2_faststr@€"".autotmp_0923type.string "".~r10type.bool "".keytype.string "".envtype.*"".Env€^€€4V  +#]Tgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).GetBool eH‹ %H;awèëêHƒì`H‹\$hH‰$H‹\$pH‰\$H‹\$xH‰\$èH‹L$H‹D$ H‰L$PH‰ $H‰D$XH‰D$HHl$H‰ïH‰ÞH¥H¥èH‹L$ H‹D$(H‰L$@H‰ $H‰D$HH‰D$èH‹L$H‹D$H‰L$0Hƒøt@HƒøuGH‹t$0H‰4$H‰D$8H‰D$H5LD$L‰ÇH¥H¥èH‹D$8¶\$ €ût Æ„$€HƒÄ`ÃHƒøu:H‹t$0H‰4$H‰D$8H‰D$H5LD$L‰ÇH¥H¥èH‹D$8¶\$ €ûu³Hƒøu>H‹t$0H‰4$H‰D$8H‰D$H5LD$L‰ÇH¥H¥èH‹D$8¶\$ €û…oÿÿÿHƒøu9H‹t$0H‰4$H‰D$8H‰D$H5LD$L‰ÇH¥H¥è¶\$ €û…0ÿÿÿÆ„$€HƒÄ`à + 0runtime.morestack_noctxtp"".(*Env).Get¸go.string." \t"àstrings.Trim¤strings.ToLowerŽgo.string."0"° runtime.eqstring¨go.string."no"Ê runtime.eqstring¨"go.string."false"Ê runtime.eqstring° go.string."none"Ò runtime.eqstring@À "".autotmp_0926?type.string"".autotmp_0925type.string"".s_type.string"".value0type.bool "".keytype.string "".envtype.*"".Env"ÀÝ¿ÀÏ¿ ÐD‹F Ã7 Ñ(Tgclocals·9ff42bf311af152488d11f0f78c8d5ceTgclocals·23c4785fa8abd7e258acfe91c9f325f3æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).SetBool †eH‹ %HD$°H;AwèëåHìÐH‹”$ØH‹Œ$àH‹„$耼$ð„¼H‰T$HH‰ÎH‰ÂHH‹ H‹CH‰t$pH‰4$H‰T$xH‰T$HHl$H‰ïH‰ÞH¥H¥H‰L$`H‰L$ H‰D$hH‰D$(èH‹\$0H‰œ$H‹\$8H‰œ$˜H‹\$HHƒû„1H‹H‹KH‹[H‰”$¸H‰Œ$ÀH‰œ$ÈH‰ØH)ËHƒû}OHH‰$H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$¨H‰„$°H‰ÓH‰”$ H‰ÍHkíHëH‰$H‹œ$H‰\$H‹œ$˜H‰\$èH‹”$ H‹Œ$¨H‹„$°H‹\$HH‰$Hƒ<$t4H‰”$¸H‰T$H‰Œ$ÀH‰L$H‰„$ÈH‰D$èHÄÐÉ%ëÉéÈþÿÿH‰T$@H‰ÎH‰ÂHH‹ H‹CH‰´$€H‰4$H‰”$ˆH‰T$HHl$H‰ïH‰ÞH¥H¥H‰L$PH‰L$ H‰D$XH‰D$(èH‹\$0H‰œ$H‹\$8H‰œ$˜H‹\$@Hƒû„.H‹H‹KH‹[H‰”$ H‰Œ$¨H‰œ$°H‰ØH)ËHƒû}OHH‰$H‰”$¸H‰T$H‰Œ$ÀH‰L$H‰„$ÈH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰ÓH‰¬$ÀH‰„$ÈH‰”$¸H‰ÍHkíHëH‰$H‹œ$H‰\$H‹œ$˜H‰\$èH‹”$¸H‹Œ$ÀH‹„$ÈH‹\$@H‰$Hƒ<$t1H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$èé9þÿÿ‰%ëƉéËþÿÿ +*0runtime.morestack_noctxt¬go.string."1"îgo.string."="¾*runtime.concatstring3ütype."".Envî"runtime.growslice¨4runtime.writebarrierstringÐ2runtime.writebarrierslice¤go.string."0"ògo.string."=" *runtime.concatstring3€ type."".Envò "runtime.growslice¬ 4runtime.writebarrierstringÔ2runtime.writebarrierslice@ *"".autotmp_0943type.uint64"".autotmp_0942type.uint64"".autotmp_0941type.int"".autotmp_0940type.int"".autotmp_0939type."".Env"".autotmp_0938type."".Env"".autotmp_0937type.string"".autotmp_0932_type."".Env"".autotmp_0931/type."".Env"".autotmp_0930type.string"".autotmp_0929type."".Env"".autotmp_0928type."".Env"".valueÿtype.string "".keyŸtype.string "".envŸtype.*"".Env"".valueßtype.string "".key¿type.string "".envtype.*"".Env"".value0type.bool "".keytype.string "".envtype.*"".Env" ÑŸ ÜÐ&V:¤ª,ž˜]Ty˜]T 'Tgclocals·f774b632f7ff7d029527413a83030842Tgclocals·2d894b3b66dff3ff7aaa2a78013804f9æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ "".(*Env).GetInt –eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‰\$H‹\$8H‰\$èH‹\$H‰\$@HƒÄ à + 0runtime.morestack_noctxtp$"".(*Env).GetInt64@@ "".~r10type.int "".keytype.string "".envtype.*"".Env@0?P +l6 +7Tgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·3280bececceccd33cb74587feedb1f9fæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ "".(*Env).SetIntàÊeH‹ %HD$ØH;AwèëåHì¨H‹œ$°H‰\$@H‹œ$ÈH‰$èH‹L$H‹D$H‹¼$¸H‹”$ÀH‰|$XH‰<$H‰T$`H‰T$HH|$H‰ÞH¥H¥H‰L$HH‰L$ H‰D$PH‰D$(èH‹\$0H‰\$hH‹\$8H‰\$pH‹\$@Hƒû„"H‹H‹KH‹[H‰”$H‰Œ$˜H‰œ$ H‰ØH)ËHƒû}LHH‰$H‰T$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$€H‰„$ˆH‰ÓH‰T$xH‰ÍHkíHëH‰$H‹\$hH‰\$H‹\$pH‰\$èH‹T$xH‹Œ$€H‹„$ˆH‹\$@H‰$Hƒ<$t4H‰”$H‰T$H‰Œ$˜H‰L$H‰„$ H‰D$èHĨÉ%ëÉé×þÿÿ +*0runtime.morestack_noctxtxstrconv.Itoaàgo.string."="ª*runtime.concatstring3Ütype."".EnvÈ"runtime.growsliceð4runtime.writebarrierstring’2runtime.writebarrierslice@Ð"".autotmp_0955_type."".Env"".autotmp_0954/type."".Env"".autotmp_0953type.string"".autotmp_0952type."".Env"".autotmp_0951type.string"".value¿type.string "".keyŸtype.string "".envÏtype.*"".Env"".value0type.int "".keytype.string "".envtype.*"".Env"вÏÐðv"«;èTQ(Tgclocals·2cda55eacf8f3a391cf15caecdfeef06Tgclocals·6fac742cdcfec8bff38f6662e683bbdaæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ$"".(*Env).GetInt64ÀªeH‹ %H;awèëêHƒìXH‹\$`H‰$H‹\$hH‰\$H‹\$pH‰\$èH‹L$H‹D$ H‰L$HH‰ $H‰D$PH‰D$HHl$H‰ïH‰ÞH¥H¥èH‹L$ H‹D$(H‰L$8H‰ $H‰D$@H‰D$HÇD$ +HÇD$@èH‹L$ H‹D$(H‹\$0HƒøtHÇD$xÿÿÿÿHƒÄXÃH‰L$xHƒÄXà + 0runtime.morestack_noctxtp"".(*Env).Get¸go.string." \t"àstrings.TrimÈ strconv.ParseInt@° +"".autotmp_0963type.string"".s?type.string "".~r10type.int64 "".keytype.string "".envtype.*"".Env °°¯° ¯ à„d9 7©Tgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·44568aa369055d8938d809aa5d80843bæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ$"".(*Env).SetInt64àÜeH‹ %HD$ØH;AwèëåHì¨H‹œ$°H‰\$@H‹œ$ÈH‰$HÇD$ +èH‹L$H‹D$H‹¼$¸H‹”$ÀH‰|$XH‰<$H‰T$`H‰T$HH|$H‰ÞH¥H¥H‰L$HH‰L$ H‰D$PH‰D$(èH‹\$0H‰\$hH‹\$8H‰\$pH‹\$@Hƒû„"H‹H‹KH‹[H‰”$H‰Œ$˜H‰œ$ H‰ØH)ËHƒû}LHH‰$H‰T$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$€H‰„$ˆH‰ÓH‰T$xH‰ÍHkíHëH‰$H‹\$hH‰\$H‹\$pH‰\$èH‹T$xH‹Œ$€H‹„$ˆH‹\$@H‰$Hƒ<$t4H‰”$H‰T$H‰Œ$˜H‰L$H‰„$ H‰D$èHĨÉ%ëÉé×þÿÿ +*0runtime.morestack_noctxtŠ"strconv.FormatIntògo.string."="¼*runtime.concatstring3îtype."".EnvÚ"runtime.growslice‚4runtime.writebarrierstring¤2runtime.writebarrierslice@Ð"".autotmp_0969_type."".Env"".autotmp_0968/type."".Env"".autotmp_0967type.string"".autotmp_0966type."".Env"".autotmp_0965type.string"".value¿type.string "".keyŸtype.string "".envÏtype.*"".Env"".value0type.int64 "".keytype.string "".envtype.*"".Env"лÏÐð˜"´DèTQTgclocals·2cda55eacf8f3a391cf15caecdfeef06Tgclocals·6fac742cdcfec8bff38f6662e683bbdaæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).GetJSONÀºeH‹ %H;awèëêHƒìHHÇD$xHÇ„$€H‹\$PH‰$H‹\$XH‰\$H‹\$`H‰\$èH‹L$H‹D$ HƒøuHÇD$xHÇ„$€HƒÄHÃH‰L$8H‰ $H‰D$@H‰D$èH\$H,$H‰ïH‰ÞH¥H¥H¥H‹\$hH‰\$H‹\$pH‰\$ èH‹L$(H‹D$0H‰L$xH‰„$€HƒÄHà + + 0runtime.morestack_noctxtš"".(*Env).Getž2runtime.stringtoslicebyteú.encoding/json.Unmarshalp +"".svaltype.string "".~r2Ptype.error"".iface0"type.interface {} "".keytype.string "".envtype.*"".Env`aà¦/,e L”Tgclocals·528c559c9193f2a671691be2686ab724Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).SetJSONà +Ô +eH‹ %HD$°H;AwèëåHìÐHÇ„$HÇ„$H‹œ$ðH‰$H‹œ$øH‰\$èH‹t$H‹l$H‹T$ H‹D$(H‹L$0H‰L$pHƒøH‰D$htH‰„$H‰Œ$HÄÐÃH‹œ$ØH‰\$@H‰´$ˆH‰4$H‰¬$H‰l$H‰”$˜H‰T$èH‹L$H‹D$ H‹¼$àH‹”$èH‰L$xH‰„$€H‰|$XH‰<$H‰T$`H‰T$HH|$H‰ÞH¥H¥H‰L$HH‰L$ H‰D$PH‰D$(èH‹\$0H‰\$xH‹\$8H‰œ$€H‹\$@Hƒû„FH‹H‹KH‹[H‰”$¸H‰Œ$ÀH‰œ$ÈH‰ØH)ËHƒû}OHH‰$H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$¨H‰„$°H‰ÓH‰”$ H‰ÍHkíHëH‰$H‹\$xH‰\$H‹œ$€H‰\$èH‹”$ H‹Œ$¨H‹„$°H‹\$@H‰$Hƒ<$tLH‰”$¸H‰T$H‰Œ$ÀH‰L$H‰„$ÈH‰D$èHÇ„$HÇ„$HÄÐÉ%뫉é³þÿÿ +*0runtime.morestack_noctxt¨*encoding/json.Marshalš2runtime.slicebytetostringœgo.string."="æ*runtime.concatstring3žtype."".Env"runtime.growsliceÄ4runtime.writebarrierstringì 2runtime.writebarrierslicep "".autotmp_0983_type."".Env"".autotmp_0982/type."".Env"".autotmp_0981type.string"".autotmp_0980¯type.string"".autotmp_0979type."".Env"".valuetype.string "".keyïtype.string "".envŸtype.*"".Env "".errÏtype.error"".svaltype.[]uint8 "".~r2Ptype.error"".value0"type.interface {} "".keytype.string "".envtype.*"".Env&" vŸ €Ÿ °"º:< á SyûZT;Tgclocals·bb06efbb6a26e0f286c10766fad350d7Tgclocals·299a4d24490b926d38628658bb77eeb1æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).GetList€ +ì eH‹ %HD$ÐH;AwèëåHì°HÇ„$ÐHÇ„$ØHÇ„$àH‹œ$¸H‰$H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹L$H‹D$ H‰L$PH‰D$XHƒøu,HÇ„$ÐHÇ„$ØHÇ„$àHÄ°ÃHH‰$èH‹D$H‰D$HH‰D$@H‹\$PH‰$H‹\$XH‰\$èH\$H,$H‰ïH‰ÞH¥H¥H¥H‹L$@HH‰D$pH‰D$H‰L$xH‰L$ èH‹L$(H‹D$0H‰D$hHƒùH‰L$`„H‹\$HH‹H‹KH‹[H‰ØH)ËHƒû}OHH‰$H‰”$€H‰T$H‰Œ$ˆH‰L$H‰„$H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ˆH‰„$H‰ÓH‰”$€H‰ÍHkíHëH‰$H‹\$PH‰\$H‹\$XH‰\$èH‹”$€H‹Œ$ˆH‹„$H‹\$HH‰$H‰”$˜H‰T$H‰Œ$ H‰L$H‰„$¨H‰D$èH‹\$HH‹+H‰¬$ÐH‹kH‰¬$ØH‹kH‰¬$àHÄ°Ã +*0runtime.morestack_noctxtÚ"".(*Env).Getôtype.[]string†"runtime.newobjectÔ2runtime.stringtoslicebyte–type.*[]stringÈ.encoding/json.UnmarshalÊtype.[]string¼"runtime.growsliceê4runtime.writebarrierstring„ 2runtime.writebarrierslice`à"".autotmp_0995_type.[]string"".autotmp_0994/type.[]string"".autotmp_0992ßtype.*[]string +"".&lÏtype.*[]string "".errŸtype.error"".sval¿type.string "".~r10type.[]string "".keytype.string "".envtype.*"".Env""à”ßà¾ß €&ÖF: ,z€: lV':ÑM?Tgclocals·f09ff24693e6d72e9e2f82319a6e45a0Tgclocals·80f0398afc092a879ad303c2fec80b66æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).SetListÀ¤eH‹ %H;awèëêHƒìPHÇ„$ˆHÇ„$H‹\$pH‰\$8H‹\$xH‰\$@H‹œ$€H‰\$HHH‰$H\$8H‰\$èH\$Hl$H‰ïH‰ÞýHƒÆHƒÇHÇÁóH¥üH‹\$XH‰$H‹\$`H‰\$H‹\$hH‰\$èH‹L$(H‹D$0H‰Œ$ˆH‰„$HƒÄPà + + 0runtime.morestack_noctxt¬type.[]stringÒruntime.convT2EÞ""".(*Env).SetJSON€  +"".autotmp_1005/type.[]string "".~r2`type.error"".value0type.[]string "".keytype.string "".envtype.*"".Env ·Ÿàò2® +hxTgclocals·ff7af1025fb7deae6ebf3487eab30c33Tgclocals·61e2515c69061b8fed0e66ece719f936æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ"".(*Env).SetÀ¬eH‹ %H;awèëêHì€H‹œ$H‰$H‹´$˜H‰t$H5Hl$H‰ïH¥H¥H‹œ$ H‰\$ H‹œ$¨H‰\$(èH‹\$0H‰\$@H‹\$8H‰\$HH‹œ$ˆHƒû„H‹H‹KH‹[H‰T$hH‰L$pH‰\$xH‰ØH)ËHƒû}FHH‰$H‰T$PH‰T$H‰L$XH‰L$H‰D$`H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$XH‰D$`H‰ÓH‰T$PH‰ÍHkíHëH‰$H‹\$@H‰\$H‹\$HH‰\$èH‹T$PH‹L$XH‹D$`H‹œ$ˆH‰$Hƒ<$t+H‰T$hH‰T$H‰L$pH‰L$H‰D$xH‰D$èHĀÉ%ë̉éøþÿÿ + 0runtime.morestack_noctxtrgo.string."="È*runtime.concatstring3îtype."".EnvÎ"runtime.growsliceê4runtime.writebarrierstringô2runtime.writebarriersliceP€"".autotmp_1010_type."".Env"".autotmp_1009/type."".Env"".autotmp_1008type.string"".autotmp_1007type."".Env"".value0type.string "".keytype.string "".envtype.*"".Env€èÿ€ üácƒNE'Tgclocals·46b690808f7e1a8626f300054e53774fTgclocals·f9166171185d1f1926264897a0c959c1æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ "".(*Env).Decode  +ˆ +eH‹ %HD$¨H;AwèëåHìØHÇ„$øHÇ„$HH‰$èH‹\$H‰\$0HH‰$HÇD$èH‹D$H‹\$0H‰$H‰D$èH‹œ$èH‰\$HH‹œ$ðH‰\$PHH‰$èH‹L$H‰ÏHƒù„¼1ÀèH‰L$(H‰ $Hƒ<$„•H‹\$HH‰\$H‹\$PH‰\$èH‹L$(H‹D$0H‰ $H‰ÁHH‰D$xH‰D$H‰Œ$€H‰L$èH‹D$H‹L$ H‰L$pHƒøH‰D$htH‰„$øH‰Œ$HÄØÃH‹\$0H‹+H¼$ˆ1ÀèHH‰$H‰l$Hœ$ˆH‰\$èH‹œ$ˆ1íH9ë„œH‹œ$Hƒû„±H‹ H‹{H‹œ$ˆHƒû„‘H‹3H‹kH‰L$xH‰¼$€H‹œ$àH‰$H‰t$XH‰t$H‰l$`H‰l$H‰L$8H‰L$H‰|$@H‰|$ èHœ$ˆH‰$èH‹œ$ˆ1íH9ë…dÿÿÿHÇ„$øHÇ„$HÄØÉéhÿÿÿ‰éHÿÿÿ‰%é_þÿÿ‰é=þÿÿ$ +*0runtime.morestack_noctxtz8type.map[string]interface {}Œ"runtime.newobject®8type.map[string]interface {}Òruntime.makemap‚.runtime.writebarrierptrÄ4type.encoding/json.DecoderÖ"runtime.newobjectˆÄ runtime.duffzeroâ2runtime.writebarrieriface’:type.*map[string]interface {}Ê>encoding/json.(*Decoder).DecodeÜØ runtime.duffzeroê8type.map[string]interface {} &runtime.mapiterinit¶""".(*Env).SetAutoØ&runtime.mapiternextP°"".autotmp_1025"type.interface {}"".autotmp_1024ß6type.*encoding/json.Decoder"".autotmp_10236type.*encoding/json.Decoder"".autotmp_1022ŸBtype.map.iter[string]interface {}"".autotmp_10218type.map[string]interface {} +"".&mÏ:type.*map[string]interface {}$encoding/json.r·2Ÿtype.io.Reader"".v¿"type.interface {}"".kÿtype.string "".errßtype.error "".~r10type.error "".srctype.io.Reader "".envtype.*"".Env("°¸¯°‡¯°-0Œ:K¾‹9$  E#BF4kTgclocals·784852ecd61fa458e8af6c57e3ee02b8Tgclocals·8f12e5afe7e149987419843938d69919æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ""".(*Env).SetAuto ŒeH‹ %H„$(ÿÿÿH;AwèëâHìXHH‰$H‹œ$xH‰\$H‹œ$€H‰\$èòD$¶\$ €ût=H‹œ$`H‰$H‹œ$hH‰\$H‹œ$pH‰\$òH,ØH‰\$èHÄXÃHH‰$H‹œ$xH‰\$H‹œ$€H‰\$èH‹L$H‰Œ$ˆH‹t$ H‰´$¶\$(€û„ÃH‹œ$`H‰\$@H‹¼$hH‹”$pH‰ðH‰¼$˜H‰<$H‰”$ H‰T$HH|$H‰ÞH¥H¥H‰L$XH‰L$ H‰D$`H‰D$(èH‹\$0H‰œ$ØH‹\$8H‰œ$àH‹\$@Hƒû„.H‹H‹KH‹[H‰”$@H‰Œ$HH‰œ$PH‰ØH)ËHƒû}OHH‰$H‰”$(H‰T$H‰Œ$0H‰L$H‰„$8H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$0H‰„$8H‰ÓH‰”$(H‰ÍHkíHëH‰$H‹œ$ØH‰\$H‹œ$àH‰\$èH‹”$(H‹Œ$0H‹„$8H‹\$@H‰$Hƒ<$t1H‰”$@H‰T$H‰Œ$HH‰L$H‰„$PH‰D$èéóýÿÿ‰%ëƉéËþÿÿH‹œ$xH‰$H‹œ$€H‰\$èH‹´$`H‹l$H‰¬$øH‹T$H‰”$H‹L$ H‰Œ$H‹D$(H‹\$0H‰œ$ÐHƒøH‰„$È…åH‰t$HH‰,$H‰T$H‰L$èH‹L$H‹D$ H‹¼$hH‹”$pH‰Œ$ØH‰„$àH‰¼$¨H‰<$H‰”$°H‰T$HH|$H‰ÞH¥H¥H‰L$hH‰L$ H‰D$pH‰D$(èH‹\$0H‰œ$ØH‹\$8H‰œ$àH‹\$HHƒû„.H‹H‹KH‹[H‰”$(H‰Œ$0H‰œ$8H‰ØH)ËHƒû}OHH‰$H‰”$@H‰T$H‰Œ$HH‰L$H‰„$PH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰ÓH‰¬$HH‰„$PH‰”$@H‰ÍHkíHëH‰$H‹œ$ØH‰\$H‹œ$àH‰\$èH‹”$@H‹Œ$HH‹„$PH‹\$HH‰$Hƒ<$t1H‰”$(H‰T$H‰Œ$0H‰L$H‰„$8H‰D$èéûÿÿ‰%ëƉéËþÿÿH‰t$PHœ$èHÇHÇCHœ$èHƒû„JHÇÁHÇÂH‰Œ$H‰”$ H‰œ$H‰$H‹œ$xH‰\$H‹œ$€H‰\$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$ H‰\$ èH‹L$(H‹D$0H‹¼$hH‹”$pH‰¼$¸H‰<$H‰”$ÀH‰T$HH|$H‰ÞH¥H¥H‰L$xH‰L$ H‰„$€H‰D$(èH‹\$0H‰œ$ØH‹\$8H‰œ$àH‹\$PHƒû„.H‹H‹KH‹[H‰”$(H‰Œ$0H‰œ$8H‰ØH)ËHƒû}OHH‰$H‰”$@H‰T$H‰Œ$HH‰L$H‰„$PH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰ÓH‰´$HH‰„$PH‰”$@H‰ÍHkíHëH‰$H‹œ$ØH‰\$H‹œ$àH‰\$èH‹”$@H‹Œ$HH‹„$PH‹\$PH‰$Hƒ<$t1H‰”$(H‰T$H‰Œ$0H‰L$H‰„$8H‰D$èé%ùÿÿ‰%ëƉéËþÿÿ‰é¯ýÿÿ< +00runtime.morestack_noctxtPtype.float64–$runtime.assertE2T2 $"".(*Env).SetInt64¾type.string„$runtime.assertE2T2Ôgo.string."="ž*runtime.concatstring3Ütype."".EnvÎ"runtime.growsliceˆ 4runtime.writebarrierstring° +2runtime.writebarrierslice– *encoding/json.Marshalì 2runtime.slicebytetostring€go.string."="Ê*runtime.concatstring3ˆtype."".Envú"runtime.growslice´4runtime.writebarrierstringÜ2runtime.writebarriersliceô2runtime.writebarrieriface‚go.string."%v"öfmt.Sprintfêgo.string."="º*runtime.concatstring3øtype."".Envê"runtime.growslice¤4runtime.writebarrierstringÌ2runtime.writebarriersliceP°N"".autotmp_1056type.uint64"".autotmp_1055type.uint64"".autotmp_1054type.int"".autotmp_1053type.int"".autotmp_1052type."".Env"".autotmp_1051type."".Env"".autotmp_1050type.string"".autotmp_1048&type.[]interface {}"".autotmp_1047type.uint64"".autotmp_1046type.uint64"".autotmp_1045type.int"".autotmp_1044type.int"".autotmp_1043type."".Env"".autotmp_1042type."".Env"".autotmp_1041type.string"".autotmp_1040type.string"".autotmp_1035_type."".Env"".autotmp_1034/type."".Env"".autotmp_1033ÿtype.string"".autotmp_1032type."".Env"".autotmp_1031type.string"".autotmp_1030ß(type.[1]interface {}"".autotmp_1029type."".Env"".autotmp_1028type."".Env"".value¿type.string "".key¿type.string "".envtype.*"".Env"".valueßtype.string "".keyßtype.string "".envŸtype.*"".Env"".valueÿtype.string "".keyÿtype.string "".env¯type.*"".Env "".errŸtype.error "".val¿type.[]uint8"".svalŸtype.string"".value0"type.interface {} "".keytype.string "".envtype.*"".Env%°v¯°ô F¤%:5 R®  qÐã!NJ„˜]Tž‡]T ŒAb˜]T+Tgclocals·f2bff8318847e30874c64d3cd9d3a459Tgclocals·7c09a673592d13ccf4305e509b0c4fdfæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ"".(*Env).Map€þeH‹ %HD$ÐH;AwèëåHì°H‹œ$¸H‹kHƒýuHÇ„$ÀHÄ°ÃHH‰$HÇD$èH‹\$H‰\$PH‹œ$¸Hƒû„;H‹H‹CH‹kH‰¬$¨1ÉH‰„$ H‰D$@H‰”$˜H‰ÐH‹l$@H9éÖH‰D$XHƒø„êH‹H‹xH‰L$HH‰T$pH‰|$xH‰T$`H‰$H‰|$hH‰|$HH|$H‰ÞH¥H¥HÇD$ èH‹L$(H‹D$0H‹T$8H‰”$HH‰$H‹\$PH‰\$HƒøvgH‰L$H‰ËH‰Œ$€HƒøH‰„$ˆvBHƒÃH‰\$èH‹D$XH‹L$HHƒÀHÿÁH‹l$@H9éŒ*ÿÿÿH‹\$PH‰œ$ÀHÄ°Ãè è ‰éÿÿÿ‰é¾þÿÿ +*0runtime.morestack_noctxt–,type.map[string]stringºruntime.makemapægo.string."="šstrings.SplitNÖ,type.map[string]stringÖ$runtime.mapassign1È$runtime.panicindexÖ$runtime.panicindex à"".autotmp_1076type.string"".autotmp_1075¯type.*string"".autotmp_1074ßtype.int"".autotmp_1073type.int"".autotmp_1071/type."".Env"".autotmp_1069Ïtype.int"".parts_type.[]string +"".kvŸtype.string"".m¿,type.map[string]string "".~r0,type.map[string]string "".envtype.*"".Env&"à%ßàÚßàÀ0¾"#rKG\°—Tgclocals·2148c3737b2bb476685a1100a2e8343eTgclocals·f3e8856499aee240134cb47f88c6cd55æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.goþ:"".(*Client).AddEventListener€äeH‹ %H;awèëêHƒì0HÇD$HHÇD$PHÇD$ HÇD$(H‹\$8H‹k0H‰,$èH‹T$8¶\$€û…H‹j0H‰,$H‰T$èH‹T$8H‹L$H‹D$H‰D$(HƒùH‰L$ tH‰L$HH‰D$PHƒÄ0ÃH‹j0H‰,$H‹\$@H‰\$èH‹D$H‹L$HƒøtH‰D$HH‰L$PHƒÄ0ÃHÇD$HHÇD$PHƒÄ0Ãë± + + 0runtime.morestack_noctxt˜H"".(*eventMonitoringState).isEnabledâ`"".(*eventMonitoringState).enableEventMonitoringìL"".(*eventMonitoringState).addListener@` +"".autotmp_1079type.error "".errtype.error "".~r1 type.error"".listener2type.chan<- *"".APIEvents"".ctype.*"".Client$`ˆ_`5_`_`€0€,%& ! KµTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþ@"".(*Client).RemoveEventListeneràÔeH‹ %H;awèëêHƒì0HÇD$HHÇD$PH‹\$8H‹k0H‰,$H‹\$@H‰\$èH‹T$8H‹D$H‹L$H‰L$(HƒøH‰D$ tH‰D$HH‰L$PHƒÄ0ÃH‹j0H‹]XHƒûu$H‹j0H‰,$èHÇD$HHÇD$PHƒÄ0Ãëç + 0runtime.morestack_noctxtˆR"".(*eventMonitoringState).removeListeneršb"".(*eventMonitoringState).disableEventMonitoring@` "".errtype.error "".~r1 type.error"".listener.type.chan *"".APIEvents"".ctype.*"".Client`[_`1_`°  ,0   +CmTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþL"".(*eventMonitoringState).addListenerÀ À eH‹ %H;awèëêHƒìpHÇ„$ˆHÇ„$H‹\$xH‰$Hƒ<$„èH‹\$xH‰$Hƒ<$„éH QjèYYH…À…ÄH‹œ$€H‰$H‹\$xH‰\$Hƒ|$„–HƒD$Pè¶\$€ût)H‹H‰œ$ˆH‹H‰œ$èHƒÄpÃH‹\$xH‰$Hƒ<$„8Hƒ$HÇD$èH‹\$xHƒû„H‹sPH‹SXH‹K`H‰t$XH‰T$`H‰L$hH‰ËH)ÓHƒû}FHH‰$H‰t$@H‰t$H‰T$HH‰T$H‰L$PH‰L$HÇD$ èH‹t$(H‹T$0H‹L$8H‰ÕHÿÅH‰l$HH‰L$PH‰t$@HÖH‰$H‹œ$€H‰\$èH‹l$@H‹T$HH‹L$PH‹\$xH‰$Hƒ<$tKHƒ$PH‰l$XH‰l$H‰T$`H‰T$H‰L$hH‰L$èHÇ„$ˆHÇ„$èHƒÄpÉ%묉éêþÿÿ‰%é¼þÿÿ‰%é^þÿÿèHƒÄpÉ%é þÿÿ‰%éæýÿÿ + 0runtime.morestack_noctxtŽ(sync.(*RWMutex).LockÄ2sync.(*RWMutex).Unlock·fÔ"runtime.deferprocÄ""".listenerExistsæ6"".ErrListenerAlreadyExists„6"".ErrListenerAlreadyExists &runtime.deferreturnø*sync.(*WaitGroup).Addò6type.[]chan<- *"".APIEventsÒ"runtime.growsliceÎ.runtime.writebarrierptrÜ2runtime.writebarrierslice˜&runtime.deferreturnþ&runtime.deferreturn@à "".autotmp_1085_6type.[]chan<- *"".APIEvents"".autotmp_1084/6type.[]chan<- *"".APIEvents"".autotmp_10836type.[]chan<- *"".APIEvents "".~r1 type.error"".listener2type.chan<- *"".APIEvents"".eventState:type.*"".eventMonitoringStateBàMhßà»ßà2ßàà:¶2.7)'ò#   $F[--«GVTgclocals·4ab27d0e7d4f80bb5765ef5f61de5fe5Tgclocals·551282070bdf4bca9f3b8ada2a8f2d2aê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþR"".(*eventMonitoringState).removeListenerà  eH‹ %HD$ØH;AwèëåHì¨HÇ„$ÀHÇ„$ÈH‹œ$°H‰$Hƒ<$„„èH‹œ$°H‰$Hƒ<$„\H QjèYYH…À…4H‹œ$¸H‰$H‹œ$°H‰\$Hƒ|$„HƒD$Pè¶\$€û„èH‹œ$°1Ò1ÉE1ÀHƒû„ÈH‹sPH‹CXH‹k`H‰¬$ 1ÿH‰„$˜H‰D$@H‰´$H‹l$@H9ïóH‰t$XL‹H‰|$HL‰L$PH‹¬$¸I9é„»H‰T$`H‰L$hL‰D$pL‰ÀL‰ÃH)ËHƒû}QHH‰$H‰T$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$HÇD$ èL‹L$PH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$€H‰„$ˆH‰T$xHÊH‰$L‰L$èH‹|$HH‹t$XH‹T$xH‹Œ$€L‹„$ˆHƒÆHÿÇH‹l$@H9ïŒ ÿÿÿH‹œ$°H‰$Hƒ<$t}Hƒ$PH‰T$`H‰T$H‰L$hH‰L$L‰D$pL‰D$èH‹œ$°H‰$Hƒ<$t9Hƒ$HÇD$ÿÿÿÿèHÇ„$ÀHÇ„$ÈèHĨÉ%뾉%éwÿÿÿ‰é1þÿÿ뼉%éñýÿÿèHĨÉ%é˜ýÿÿ‰%épýÿÿ +*0runtime.morestack_noctxt¤(sync.(*RWMutex).Lockà2sync.(*RWMutex).Unlock·fð"runtime.deferprocæ""".listenerExists¶6type.[]chan<- *"".APIEvents¢"runtime.growslice¤.runtime.writebarrierptr‚ 2runtime.writebarriersliceÎ *sync.(*WaitGroup).AddŠ +&runtime.deferreturnú +&runtime.deferreturn@Ð"".autotmp_1100_6type.[]chan<- *"".APIEvents"".autotmp_1098Ÿ4type.*chan<- *"".APIEvents"".autotmp_1097Ïtype.int"".autotmp_1096¿type.int"".autotmp_1094/6type.[]chan<- *"".APIEvents"".l¯2type.chan<- *"".APIEvents"".newListeners6type.[]chan<- *"".APIEvents "".~r1 type.error"".listener2type.chan<- *"".APIEvents"".eventState:type.*"".eventMonitoringState6"ÐS’ÏÐ7ÏÐ'ðVÌ:1>P» +;&&    QaÞAoCmTgclocals·00180cfd7eeeff04c22905d29bdac052Tgclocals·158185e77a15ce9170c1aa92e62cd73eê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþR"".(*eventMonitoringState).closeListenersàØeH‹ %H;awèëêHƒì@H‹\$HHƒû„¼H‹SPH‹CXH‹k`H‰l$81ÉH‰D$0H‰D$H‰T$(H‰ÐH‹l$H9é}TH‰D$ H‹(H‰L$H‰,$èH‹\$HH‰$Hƒ<$t^Hƒ$HÇD$ÿÿÿÿèH‹D$ H‹L$HƒÀHÿÁH‹l$H9é|¬H‹\$HHƒût!HkPHÇEHÇEHÇEHƒÄ@ÉëÛ‰%뙉é=ÿÿÿ + 0runtime.morestack_noctxtÔ"runtime.closechanš*sync.(*WaitGroup).Add€ +"".autotmp_1111?4type.*chan<- *"".APIEvents"".autotmp_1110_type.int"".autotmp_1109Otype.int"".autotmp_1108/6type.[]chan<- *"".APIEvents"".eventState:type.*"".eventMonitoringState€½€ð,ìK #'   i‡Tgclocals·ac5bea9c8a91f5fb1d31bdacc5067b57Tgclocals·29f0050a5ee7c2b9348a75428171d7deê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþ""".listenerExists ŒH‹|$H‹\$Hƒût2H‹H‹sH‹k1ÉH9ñ}H‹H9ûuÆD$ÃHƒÀHÿÁH9ñ|æÆD$ÉëÊ0 "".~r2 type.bool"".list8type.*[]chan<- *"".APIEvents"".a2type.chan<- *"".APIEventsPP ü  + Tgclocals·d3486bc7ce1948dc22d7ad1c0be2887aTgclocals·3280bececceccd33cb74587feedb1f9fê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþ`"".(*eventMonitoringState).enableEventMonitoringÀ¤eH‹ %H;awèëêHƒìHÇD$0HÇD$8H‹\$ H‰$Hƒ<$„†èH‹\$ H‰$Hƒ<$„aH QjèH‹T$0YYH…À…7¶Z0€û…ìHÇÅ@ˆj0HH‰$èH‹L$HÇH‹\$ H‰$Hƒ<$„ãHƒ$8H‰L$èHH‰$HÇD$dèH‹L$H‹\$ H‰$Hƒ<$„–Hƒ$@H‰L$èHH‰$HÇD$èH‹L$H‹\$ H‰$Hƒ<$tPHƒ$HH‰L$èH‹\$ H‰$H‹\$(H‰\$H QjèYYHÇD$0HÇD$8èHƒÄÉ%막%é^ÿÿÿ‰%éÿÿÿèHƒÄÉ%é“þÿÿ‰%énþÿÿ$ + 0runtime.morestack_noctxt‚(sync.(*RWMutex).Lock¸2sync.(*RWMutex).Unlock·fÈ"runtime.deferproc¦type.int64¸"runtime.newobject–.runtime.writebarrierptr¤.type.chan *"".APIEventsÈ runtime.makechan˜.runtime.writebarrierptr¦type.chan errorÊ runtime.makechan’.runtime.writebarrierptrÆV"".(*eventMonitoringState).monitorEvents·fÖruntime.newprocŠ&runtime.deferreturnâ&runtime.deferreturn@0 "".~r1 type.error"".ctype.*"".Client"".eventState:type.*"".eventMonitoringState<0G ù/0+/0&àDŽ,3  #A=$   @[è]Tgclocals·fa051c55663fc115869f36c85a0645b9Tgclocals·0115f8d53b75c1696444f08ad03251d9ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþb"".(*eventMonitoringState).disableEventMonitoringÀ®eH‹ %H;awèëêHƒìHÇD$HÇD$ H‹\$H‰$Hƒ<$„ËèH‹\$H‰$Hƒ<$„¦H QjèYYH…À…H‹\$H‰$èH‹\$H‰$Hƒ<$tZHƒ$èH‹L$¶Y0€ût%1í@ˆi0H‹i@H‰,$èH‹\$H‹kHH‰,$èHÇD$HÇD$ èHƒÄÉ%ëèHƒÄÉ%éNÿÿÿ‰%é)ÿÿÿ + 0runtime.morestack_noctxt‚(sync.(*RWMutex).Lock¸2sync.(*RWMutex).Unlock·fÈ"runtime.deferprocúR"".(*eventMonitoringState).closeListeners®,sync.(*WaitGroup).Waitð"runtime.closechan”"runtime.closechanÄ&runtime.deferreturnì&runtime.deferreturn0 "".~r0type.error"".eventState:type.*"".eventMonitoringState,G€! 8ª,.   @<d@Tgclocals·a9282ac20787dc3025c0916068a42263Tgclocals·0115f8d53b75c1696444f08ad03251d9ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþP"".(*eventMonitoringState).monitorEventsàÒeH‹ %H„$(ÿÿÿH;AwèëâHìXHH‰$èH‹D$H‰D$`H‹¬$`H‰(HH‰$èH‹D$H‰D$hH‹¬$hH‰(HÇD$pHÇD$xH‹\$`H‹+H‰,$è¶\$€ûtHÇ$€–˜èëÖH‹\$`H‹+H‰,$H‹\$hH‹+H‰l$èH‹T$H‹L$H‰L$xHƒúH‰T$ptH‹\$`H‹+H‰,$èèHÄXÃH‹\$`H‹+H‰,$è¶\$€û„úHÇ$áõèH‹L$`H‹\$H‰\$0H‹H‹k@H‰l$XHÇD$PH‹H‹kHH‰l$HHÇ„$€HÇ„$ˆH¼$1ÀèHœ$H‰$HÇD$ÈÇD$èH¬$H‰,$H‹l$XH‰l$Hl$PH‰l$Hl$/H‰l$èH‹t$`¶\$ €û„…H‹T$P¶\$/€ûuèHÄXÃH‹-H9êuH‹.H‰,$èèHÄXÃH‹.H‰,$H‰T$8H‰T$èH‹\$`H‹+H‰,$H‹\$8H‰\$H QjèYYé”þÿÿH¬$H‰,$H‹l$HH‰l$H¬$€H‰l$è¶\$€û„2H‹”$€H‹Œ$ˆH‰L$xH‹-H9êuYH‰T$pH‰$H‰L$H‹-H‰l$H‹-H‰l$èH‹T$p¶\$ €ûtH‹\$`H‹+H‰,$èèHÄXÃHƒúH‰T$p„ÕýÿÿHH‰$èH‹L$H-H‰)H‰L$@H‰ $Hƒ<$tqHƒ$H‹\$`H‰\$èH‹\$@H‰$Hƒ<$tDHƒ$H‹\$hH‰\$èH‹\$@SjèYYH…ÀuèHÄXÃèHÄXÉ%볉%ë†H¬$H‰,$H‹l$0H‰l$HÇD$è¶\$€û…úüÿÿHœ$H‰$è èHÄXÃR +00runtime.morestack_noctxtP:type.*"".eventMonitoringStateb"runtime.newobjectštype.*"".Client¬"runtime.newobjectœL"".(*eventMonitoringState).noListenersÊtime.SleepŠV"".(*eventMonitoringState).connectWithRetryàb"".(*eventMonitoringState).disableEventMonitoringì&runtime.deferreturnžH"".(*eventMonitoringState).isEnabledÔtime.After‚œ runtime.duffzeroÆ"runtime.newselect¤&runtime.selectrecv2ô&runtime.deferreturn’"".EOFEvent´b"".(*eventMonitoringState).disableEventMonitoringÀ&runtime.deferreturnüR"".(*eventMonitoringState).updateLastSeen¶ N"".(*eventMonitoringState).sendEvent·fÆ runtime.newproc¤ +$runtime.selectrecvø +""".ErrNoListeners¬ """.ErrNoListenersÄ """.ErrNoListenersØ runtime.ifaceeq˜ b"".(*eventMonitoringState).disableEventMonitoring¤ &runtime.deferreturnà Žtype.struct { F uintptr; A0 **"".eventMonitoringState; A1 **"".Client }ò "runtime.newobjectŠ "".func·006Ø .runtime.writebarrierptr .runtime.writebarrierptrº"runtime.deferprocÔ&runtime.deferreturnð&runtime.deferreturnì$runtime.selectrecvª runtime.selectgoº&runtime.deferreturn °"".autotmp_1130¯type.*struct { F uintptr; A0 **"".eventMonitoringState; A1 **"".Client }"".autotmp_1129ìtype.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }"".autotmp_1128¯type.error"".autotmp_1127Ÿtype.chan error"".autotmp_1126Ñtype.bool"".autotmp_1125$type.*"".APIEvents"".autotmp_1124ÿ.type.chan *"".APIEvents"".autotmp_1122type.bool +"".&cß type.**"".Client"".&eventStateïnet/http/httputil.NewClientConnžgo.string."GET"š&net/http.NewRequest˜2runtime.writebarrieriface¢Dnet/http/httputil.(*ClientConn).Do 2runtime.writebarrierifaceˆ¸type.struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }š"runtime.newobject²"".func·007Ž.runtime.writebarrierpträ.runtime.writebarrierptr´.runtime.writebarrierptr‚.runtime.writebarrierptrÂruntime.newprocîBgo.itab.*crypto/tls.Conn.net.Conn¦crypto/tls.DialÄ2runtime.writebarrierifaceÜ*type.*crypto/tls.Connòtype.net.ConnŠBgo.itab.*crypto/tls.Conn.net.Connž runtime.typ2ItabÊgo.string."tcp"`€2"".autotmp_1160¯ºtype.*struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }"".autotmp_1159type.error"".autotmp_1158type.error"".autotmp_1157type.error"".autotmp_1156Ÿtype.*uint8"".autotmp_1154ïtype.error"".autotmp_1153Ï"type.interface {}"".autotmp_1151/&type.[]interface {}"".autotmp_1149¯type.string"".autotmp_1148ïtype.int64"".autotmp_1147(type.[1]interface {}"".autotmp_1146otype.string"".&errChanÿ type.*chan error"".&eventChan0type.*chan *"".APIEvents +"".&cß type.**"".Client"".&errïtype.*error "".resß.type.*net/http.Response "".reqÏ,type.*net/http.Request"".conn¿Dtype.*net/http/httputil.ClientConn"".dialtype.net.Conn"".addressOtype.string"".protocol¯type.string "".uriÏtype.string "".~r3@type.error"".startTimetype.int64V%€µÿ€áÿ€ƒÿ€Ýÿ€‰Ð”ôº +Ð 1L‡'1Œ_4æ -K²7- h0(%ÞvB©–O\O?E?w+(' !²O-‚Tgclocals·8c02cd934f4d00aa05beba150d4d3e04Tgclocals·c1f40b05e3ffba0283c820006999a7cfê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþ."".(*Client).CreateExecàÐeH‹ %H„$8ÿÿÿH;AwèëâHìHHÇ„$ HÇ„$¨Hœ$°HÇHÇCHœ$°Hƒû„»HÇÅHÇÂH‰œ$ðH‰¬$øH‰”$HH‰$Hœ$XH‰\$HƒD$ èH‹T$H‹D$H‹œ$ðH‰$H‰”$ H‰T$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ðH‰\$H‹œ$øH‰\$H‹œ$H‰\$ èH‹T$(H‹D$0H‰”$€H‰„$ˆHœ$XH¬$H‰ïH‰ÞèHœ$ØHÇHÇCHÇCHH‰$Hœ$H‰\$èH\$H¬$ØH‰ïH‰ÞH¥H¥H‹´$PH‰4$H5Hl$H‰ïH¥H¥H‹œ$€H‰\$H‹´$ˆH‰t$ H´$ØHl$(H‰ïH¥H¥H¥èH‹\$@H‰œ$ÀH‹\$HH‰œ$ÈH‹\$PH‰œ$ÐH‹L$XH‹D$`H‰„$H‹T$hH‰”$˜Hù”…ÜHH‰$èH‹L$H‰ÏHƒù„³1ÀèH‰L$pH‰ $Hƒ<$„ŒHœ$xHl$H‰ïH‰ÞH¥H¥èH‹\$pH‰\$pH‹1íH9èt)HÇ„$˜H‹\$pH‰œ$¨H‰„$ HÄHÃHH‰$HH‰\$HH‰\$èH‹D$먉%éhÿÿÿ‰éFÿÿÿHƒøt$HÇ„$˜H‰„$ H‰”$¨HÄHÃHH‰$èH‹D$H‰D$xH‹œ$ÀH‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$H H‰Œ$ H‰L$H‰„$¨H‰D$ èH‹D$(H‹L$0Hƒøt$HÇ„$˜H‰„$ H‰Œ$¨HÄHÃH‹\$xH‰œ$˜HÇ„$ HÇ„$¨HÄHÉé>üÿÿ2 +00runtime.morestack_noctxtžtype.stringÖruntime.convT2EÀ2runtime.writebarrierifaceÎ>go.string."/containers/%s/exec"Âfmt.Sprintf¬  runtime.duffcopyø2type."".CreateExecOptions¤runtime.convT2Eø go.string."POST"ú"".(*Client).do® .type."".NoSuchContainerÀ "runtime.newobjectò ð runtime.duffzeroÒ +4runtime.writebarrierstringô +Bgo.itab.*"".NoSuchContainer.errorâ 0type.*"".NoSuchContainerø type.error Bgo.itab.*"".NoSuchContainer.error¤  runtime.typ2Itabº type."".ExecÌ "runtime.newobjectºtype.*"".Execø.encoding/json.UnmarshalÀ"".autotmp_1175¯0type.*"".NoSuchContainer"".autotmp_1174ß"type."".doOptions"".autotmp_1173Ï"type.interface {}"".autotmp_1171¯&type.[]interface {}"".autotmp_11680type.*"".NoSuchContainer"".autotmp_11672type."".CreateExecOptions"".autotmp_1165¯(type.[1]interface {}"".&execŸtype.*"".Exec "".errïtype.error"".bodytype.[]uint8"".pathtype.string "".~r2 type.error "".~r1type.*"".Exec"".opts2type."".CreateExecOptions"".ctype.*"".Client@%Èk™,°>º=‚ˆ Ü$[$-*ªvqkcI +½VuTgclocals·5a21f577b603cab6ea76228d95a69547Tgclocals·b0ce568b8ee350283c34690ddf2c6892è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.goþ,"".(*Client).StartExec€öeH‹ %H„$ØþÿÿH;AwèëâHì¨H‹„$ÀHÇ„$HÇ„$Hƒø…ÄHH‰$èH‹D$H‰„$ˆH‰$Hƒ<$„ŒH‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹œ$ˆH‰œ$ˆH‹1íH9èt H‹œ$ˆH‰œ$H‰„$HĨÃHH‰$HH‰\$HH‰\$èH‹D$뱉%éhÿÿÿH‹œ$¸H‰œ$ÀH‰„$ÈHœ$ÐHÇHÇCHœ$ÐHƒû„ HÇÅHÇÂH‰œ$øH‰¬$H‰”$HH‰$Hœ$ÀH‰\$èH‹T$H‹D$H‹œ$øH‰$H‰”$°H‰T$H‰„$¸H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$øH‰\$H‹œ$H‰\$H‹œ$H‰\$ èH‹T$(H‹D$0H‰”$H‰„$˜¶œ$È€û„ðHœ$ÈH¬$H‰ïH‰ÞèHœ$àHÇHÇCHÇCHH‰$Hœ$H‰\$èH\$H¬$àH‰ïH‰ÞH¥H¥H‹´$°H‰4$H5Hl$H‰ïH¥H¥H‹œ$H‰\$H‹´$˜H‰t$ H´$àHl$(H‰ïH¥H¥H¥èH‹L$XH‹D$`H‰„$ H‹T$hH‰”$¨Hù”…ÄHH‰$èH‹D$H‰„$ˆH‰$Hƒ<$„ŒH‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹œ$ˆH‰œ$ˆH‹1íH9èt H‹œ$ˆH‰œ$H‰„$HĨÃHH‰$HH‰\$HH‰\$èH‹D$뱉%éhÿÿÿHƒøtH‰„$H‰”$HĨÃHÇ„$HÇ„$HĨÃHœ$ÈH¬$H‰ïH‰ÞèHH¬$XH‰ïH‰ÞèH‹´$H‰´$X¶´$@ˆ´$`H´$ÐH¬$hH‰ïH¥H¥Hœ$àH¬$xH‰ïH‰ÞH¥H¥Hœ$ðH¬$ˆH‰ïH‰ÞH¥H¥HH‰$Hœ$H‰\$èH\$H¬$˜H‰ïH‰ÞH¥H¥H‹´$°H‰4$H5Hl$H‰ïH¥H¥H‹œ$H‰\$H‹´$˜H‰t$ H´$XHl$(H‰ïèèH‹L$xH‹„$€H‰Œ$H‰„$HĨÉéÙûÿÿH +00runtime.morestack_noctxt¤$type."".NoSuchExec¶"runtime.newobject¬4runtime.writebarrierstringÚ8go.itab.*"".NoSuchExec.error¶&type.*"".NoSuchExecÌtype.errorä8go.itab.*"".NoSuchExec.errorø runtime.typ2Itabútype.string¦runtime.convT2E2runtime.writebarrierifacež4go.string."/exec/%s/start"’fmt.Sprintfž „ runtime.duffcopyê 0type."".StartExecOptions– +runtime.convT2Eê + go.string."POST"ì "".(*Client).doÒ $type."".NoSuchExecä "runtime.newobjectÚ 4runtime.writebarrierstringˆ8go.itab.*"".NoSuchExec.errorä&type.*"".NoSuchExecútype.error’8go.itab.*"".NoSuchExec.error¦ runtime.typ2Itabþ„ runtime.duffcopyŒ""".statictmp_1200²è runtime.duffcopy–0type."".StartExecOptionsÂruntime.convT2E– go.string."POST"Œè runtime.duffcopy–&"".(*Client).hijackàÐ&"".autotmp_1199Ÿ*type."".hijackOptions"".autotmp_1198type.*uint8"".autotmp_1197&type.*"".NoSuchExec"".autotmp_1196"type."".doOptions"".autotmp_1195ï"type.interface {}"".autotmp_1193ß&type.[]interface {}"".autotmp_1191¿&type.*"".NoSuchExec"".autotmp_11890type."".StartExecOptions"".autotmp_1188&type.*"".NoSuchExec"".autotmp_1187¯0type."".StartExecOptions"".autotmp_1185Ïtype.string"".autotmp_1184¯(type.[1]interface {}"".autotmp_1183&type.*"".NoSuchExec "".errtype.error"".path¯type.string "".~r2Àtype.error"".opts00type."".StartExecOptions +"".idtype.string"".ctype.*"".ClientN%вÏÐÖÏÐXÏÐÏÐËÏÐ € RèE +Ä”á Ä  …  x' 4Z;ýv‚k< ;ô +j6Tgclocals·1bba016e9a05211bda029a0d75dbaa68Tgclocals·49a517ef1dc6f4c5ce9b594512df2da4è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.goþ4"".(*Client).ResizeExecTTYàÂeH‹ %H„$ÿÿÿH;AwèëâHìpHÇ„$ HÇ„$¨HH‰$HÇD$èH‹D$H‰„$€H‰D$pH‹œ$H‰$èH‹L$H‹D$HH‹3H‹kH‰Œ$ˆH‰„$H‰´$¸H‰´$øH‰¬$ÀH‰¬$HH‰$èH‹\$Hƒû„ºHÇÂHÇÁH‰œ$8H‰”$@H‰Œ$HH‹œ$8H‰$H‹œ$ˆH‰\$H‹œ$H‰\$èHH‰$H‹\$pH‰\$Hœ$øH‰\$Hœ$8H‰\$èH‹œ$€H‰\$xH‹œ$˜H‰$èH‹L$H‹D$HH‹3H‹kH‰Œ$˜H‰„$ H‰´$ÈH‰´$øH‰¬$ÐH‰¬$HH‰$èH‹\$Hƒû„©HÇÂHÇÁH‰œ$8H‰”$@H‰Œ$HH‹œ$8H‰$H‹œ$˜H‰\$H‹œ$ H‰\$èHH‰$H‹\$xH‰\$Hœ$øH‰\$Hœ$8H‰\$èH‹œ$€H‰œ$øH‹œ$ˆH‰œ$H‹œ$€H‰$èH‹\$H‰œ$èH‹\$H‰œ$ðH¼$P1ÀèHœ$PHƒû„±HÇÁHÇÂH‰œ$ H‰Œ$(H‰”$0HH‰$Hœ$øH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ØH‰L$H‰„$àH‰D$èHH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$ØH‰L$H‰„$àH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹L$(H‹D$0Hœ$HÇHÇCHÇCH‹´$xH‰4$H5Hl$H‰ïH¥H¥H‰Œ$¨H‰L$H‰„$°H‰D$ Hœ$Hl$(H‰ïH‰ÞH¥H¥H¥èH‹L$`H‹D$hH‰Œ$ H‰„$¨HÄpÉéHþÿÿ‰éPýÿÿ‰é?üÿÿ< +00runtime.morestack_noctxt€&type.net/url.Values¤runtime.makemapêstrconv.ItoaŒgo.string."h"ˆtype.[1]stringš"runtime.newobjectÚ4runtime.writebarrierstringè&type.net/url.ValuesÂ$runtime.mapassign1þstrconv.Itoa go.string."w"œtype.[1]string®"runtime.newobjectî4runtime.writebarrierstringü&type.net/url.ValuesÖ $runtime.mapassign1¸ +*net/url.Values.EncodeŠ ð runtime.duffzeroˆ type.string´ runtime.convT2Ež 2runtime.writebarrieriface¬ type.stringØ runtime.convT2EÊ2runtime.writebarrierifaceØQ#X`4X`41~Ì€KTgclocals·a32b6721babe8d92a9fbbdd02846be55Tgclocals·2087372b16bcbe3ecc60cbe0fcb1f127è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.goþ0"".(*Client).InspectExec œeH‹ %H„$hÿÿÿH;AwèëâHìHÇ„$@HÇ„$HH‹œ$(H‰œ$°H‹œ$0H‰œ$¸Hœ$ÀHÇHÇCHœ$ÀHƒû„AHÇÂHÇÁH‰œ$H‰”$H‰Œ$HH‰$Hœ$°H‰\$èH‹L$H‹D$H‹œ$H‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$ èH‹L$(H‹D$0Hœ$èHÇHÇCHÇCH‹´$ H‰4$H5Hl$H‰ïH¥H¥H‰Œ$€H‰L$H‰„$ˆH‰D$ Hœ$èHl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$ÐH‹\$HH‰œ$ØH‹\$PH‰œ$àH‹L$XH‹D$`H‰„$H‹T$hH‰”$˜Hù”…ÄHH‰$èH‹D$H‰D$pH‰$Hƒ<$„H‹œ$(H‰\$H‹œ$0H‰\$èH‹\$pH‰\$pH‹1íH9èt)HÇ„$8H‹\$pH‰œ$HH‰„$@HÄÃHH‰$HH‰\$HH‰\$èH‹D$먉%éeÿÿÿHƒøt$HÇ„$8H‰„$@H‰”$HHÄÃHH‰$èH‹D$H‰D$xH‹œ$ÐH‰$H‹œ$ØH‰\$H‹œ$àH‰\$H H‰Œ$ H‰L$H‰„$¨H‰D$ èH‹D$(H‹L$0Hƒøt$HÇ„$8H‰„$@H‰Œ$HHÄÃH‹\$xH‰œ$8HÇ„$@HÇ„$HHÄÉé¸üÿÿ* +00runtime.morestack_noctxtÞtype.stringŠruntime.convT2Eô2runtime.writebarrieriface‚2go.string."/exec/%s/json"öfmt.Sprintfîgo.string."GET"ö"".(*Client).doª$type."".NoSuchExec¼"runtime.newobject¬ 4runtime.writebarrierstringÎ 8go.itab.*"".NoSuchExec.error¼ +&type.*"".NoSuchExecÒ +type.errorê +8go.itab.*"".NoSuchExec.errorþ + runtime.typ2Itab† &type."".ExecInspect˜ "runtime.newobject† (type.*"".ExecInspectÄ .encoding/json.Unmarshal`°"".autotmp_1243Ï&type.*"".NoSuchExec"".autotmp_1242_"type."".doOptions"".autotmp_1241ï"type.interface {}"".autotmp_1239/&type.[]interface {}"".autotmp_1236&type.*"".NoSuchExec"".autotmp_1234Ïtype.string"".autotmp_1233¯(type.[1]interface {}"".&exec¿(type.*"".ExecInspect "".errtype.error"".bodytype.[]uint8"".path¯type.string "".~r2@type.error "".~r10(type.*"".ExecInspect +"".idtype.string"".ctype.*"".Client@%°õ¯°d¯°™¯°,¯° Ð>Ä=Œ¼ Ä$[$- &Äv€›¶VoTgclocals·f09ff24693e6d72e9e2f82319a6e45a0Tgclocals·384a8bdfde3344f7f6d876df7f86cb62è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.goþ,"".(*NoSuchExec).Error€úeH‹ %H;awèëêHƒì0HÇD$@HÇD$HHH,$H‰ïH‰ÞH¥H¥H‹|$8Hƒÿt-H/H|$H‰îH¥H¥èH‹\$ H‰\$@H‹\$(H‰\$HHƒÄ0ÉëÏ + 0runtime.morestack_noctxt^Fgo.string."No such exec instance: "¸*runtime.concatstring20` "".~r0type.string "".err&type.*"".NoSuchExec`^_`€ ð,T +[%Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.goþ."".(*Client).ListImages€ ò eH‹ %HD$€H;AwèëåHìHÇ„$(HÇ„$0HÇ„$8HÇ„$@HÇ„$H¶œ$ˆœ$èH‹œ$H‰œ$ð¶œ$ ˆœ$øHH‰$Hœ$èH‰\$èH\$H,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH,$H‰ïH‰ÞH¥H¥H‰Œ$¨H‰L$H‰„$°H‰D$èH‹L$ H‹D$(Hœ$ÐHÇHÇCHÇCH‹´$H‰4$H5Hl$H‰ïH¥H¥H‰L$xH‰L$H‰„$€H‰D$ Hœ$ÐHl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$¸H‹\$HH‰œ$ÀH‹\$PH‰œ$ÈH‹D$`H‹L$hH‰Œ$HƒøH‰„$ˆty  i‡Tgclocals·40aca4155fa4487b7677bf99ab045398Tgclocals·af8ab8e4cb4a9346224baf13f9cce340ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ,"".(*Client).LoadImageàÆeH‹ %HD$¸H;AwèëåHìÈHÇ„$èHÇ„$ðH¼$€1ÀèHÇÆ@ˆ´$€H´$ØH¬$H‰ïH¥H¥H‹´$ÐH‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥Hœ$€Hl$(H‰ïH‰ÞèèH‹L$pH‹D$xH‰Œ$èH‰„$ðHÄÈà +*0runtime.morestack_noctxtŠÜ runtime.duffzeroü go.string."POST"¢0go.string."/images/load"ð„ runtime.duffcopyú&"".(*Client).streamP"".autotmp_1320*type."".streamOptions "".~r10type.error"".opts0type."".LoadImageOptions"".ctype.*"".Client"Àðè:5 ¼4Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·aa2b73cba71b69bc124f64f356bee8e7ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ0"".(*Client).ExportImageÀºeH‹ %H„$pÿÿÿH;AwèëâHìHÇ„$@HÇ„$HHœ$ HÇHÇCHœ$ Hƒû„pHÇÅHÇÂH‰œ$°H‰¬$¸H‰”$ÀHH‰$Hœ$ H‰\$èH‹T$H‹D$H‹œ$°H‰$H‰”$€H‰T$H‰„$ˆH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$ èL‹D$(H‹T$0H¼$È1ÀèHÇÆ@ˆ´$ÈH´$0H¬$èH‰ïH¥H¥H‹´$H‰4$H5Hl$H‰ïH¥H¥L‰„$L‰D$H‰”$˜H‰T$ Hœ$ÈHl$(H‰ïH‰ÞèèH‹L$pH‹D$xH‰Œ$@H‰„$HHÄÉé‰þÿÿ +00runtime.morestack_noctxtžtype.stringÊruntime.convT2E´2runtime.writebarrierifaceÂ4go.string."/images/%s/get"¶fmt.SprintfèÜ runtime.duffzeroÚgo.string."GET"Ö„ runtime.duffcopyà&"".(*Client).streamp "".autotmp_1327*type."".streamOptions"".autotmp_1326Ÿ"type.interface {}"".autotmp_1324¿&type.[]interface {}"".autotmp_1322ÿtype.string"".autotmp_1321ß(type.[1]interface {} "".~r1Ptype.error"".opts4type."".ExportImageOptions"".ctype.*"".Client% °Ÿ  +àŒ=¡x +¤vÆTgclocals·740354061e4e9c9d9a50f05557f21f54Tgclocals·fabba7188ed1b3b8bac23e1a07c66457ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ2"".(*Client).ExportImages€ðeH‹ %HD$H;AwèëåHìðHH‰$èH‹D$H¬$H‰ÇH‰îèHÇ„$(HÇ„$0Hƒ8„&H‹hHƒý„H‰„$€H‰ÂHH‰„$ˆH‰$H‰”$H‰T$èL‹D$H‹T$H¼$¨1ÀèHÇÈœ$¨H‹¼$€HoH¼$ÈH‰îH¥H¥HH,$H‰ïH‰ÞH¥H¥L‰„$˜L‰D$H‰”$ H‰T$èH\$ Hl$H‰ïH‰ÞH¥H¥H‹´$øH‰4$H5Hl$H‰ïH¥H¥Hœ$¨Hl$(H‰ïH‰ÞèèH‹L$pH‹D$xH‰Œ$(H‰„$0HÄðÃH‹H‰œ$(H‹H‰œ$0HÄðà +*0runtime.morestack_noctxtJ6type."".ExportImagesOptions\"runtime.newobjectŒô runtime.duffcopy8type.*"".ExportImagesOptionsÌ"".queryStringþÜ runtime.duffzeroÞ0go.string."/images/get?"¸*runtime.concatstring2†go.string."GET"΄ runtime.duffcopyØ&"".(*Client).streamª,"".ErrMustSpecifyNamesÈ,"".ErrMustSpecifyNames€à +"".autotmp_1335*type."".streamOptions"".autotmp_1333¯type.string"".&optsß8type.*"".ExportImagesOptions "".~r1`type.error"".ctype.*"".Client "àïßà%ß À°b¡w .-xvPUTgclocals·0629ba7d00f7a57ad6e2352df47e7bb3Tgclocals·d83f68254ae1224f9001a252749abef2ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ0"".(*Client).ImportImageàÌeH‹ %HD$ H;AwèëåHìàHH‰$èH‹D$H¬$ðH‰ÇH‰îèHÇ„$HHÇ„$PH‹XHƒûu&H‹H‰œ$HH‹H‰œ$PHÄàÃH‰D$hH‹PH‹HHƒù…¤H‰”$ H‰$H‰Œ$¨H‰L$H-LD$L‰ÇH‰îH¥H¥èH‹D$h¶\$ €û„]H‹PH‰”$ H‹HH‰Œ$¨Hƒù…ðH‰$H‰L$H-LD$L‰ÇH‰îH¥H¥èH‹D$h¶\$ €û„¹H‰ÁHH‰„$€H‰$H‰Œ$ˆH‰L$èH‹D$hH‹T$H‹L$H‹¼$èH‰<$H‰”$ H‰T$H‰Œ$¨H‰L$HÇD$Hh0H|$ H‰îH¥H¥Hh@H\$0H‰ßH‰îH¥H¥¶hP@ˆl$@èH‹L$HH‹D$PH‰Œ$HH‰„$PHÄàÃHhH$H‰ßH‰îH¥H¥èH‹D$h¶\$€û…ÿÿÿHhH$H‰ßH‰îH¥H¥èH‹L$H‹D$H‹T$ H‰T$xHƒøH‰D$ptH‰„$HH‰”$PHÄàÃH‰L$`H‹1íH9è„ŠH‹L$`H‰„$H‰$H‰Œ$˜H‰L$èH‹T$H‹L$H‹D$ H‹\$(H‰\$pH‹\$0H‰\$xH‰”$ÈH‰”$°H‰Œ$ÐH‰Œ$¸H‰„$ØH‰„$ÀHH‰$èH‹L$H‰ÏHƒù„ë1ÀèH‰L$XH‰ $Hƒ<$„ÄH‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹\$XH‰\$XH‹1íH9ètQH‹\$hH‰$Hƒ$0H‹L$XH‰„$H‰D$H‰Œ$˜H‰L$èH‹D$hHhHH‰ïH‰ÞH¥H¥épýÿÿHH‰$HH‰\$HH‰\$èH‹D$뀉%é0ÿÿÿ‰éÿÿÿHH‰$HH‰\$HH‰\$èH‹D$éDþÿÿHh0HÇEHÇEéŠüÿÿB +*0runtime.morestack_noctxtJ4type."".ImportImageOptions\"runtime.newobjectŒÌ runtime.duffcopyÞ""".ErrNoSuchImageü""".ErrNoSuchImageŠgo.string."-"² runtime.eqstring¼go.string."-"ä runtime.eqstringž6type.*"".ImportImageOptionsÚ"".queryString¸0"".(*Client).createImageª"".isURLþos.Open„ +4go.itab.*os.File.io.Readerà +"io/ioutil.ReadAll” "type.bytes.Buffer¦ "runtime.newobjectØ È runtime.duffzeroØ 2runtime.writebarriersliceú >go.itab.*bytes.Buffer.io.Readerì2runtime.writebarrierifaceŒgo.string."-"¸$type.*bytes.BufferÎtype.io.Readeræ>go.itab.*bytes.Buffer.io.Readerú runtime.typ2Itab¼type.*os.FileÒtype.io.Readerê4go.itab.*os.File.io.Readerþ runtime.typ2ItabàÀ"".autotmp_1349type.*uint8"".autotmp_1348$type.*bytes.Buffer"".autotmp_1347$type.*bytes.Buffer"".autotmp_1345type.string"".autotmp_1344type.string"".autotmp_1342type.string"".autotmp_1340$type.*bytes.Buffer"".autotmp_1339ÿtype.*os.File"".&optsï6type.*"".ImportImageOptionsbytes.buf·2_type.[]uint8"".b/type.[]uint8 "".errßtype.error "".~r1Àtype.error"".ctype.*"".Client4"Ào¿Àï¿Àw¿ÀöðPæb +&^Y¹*+ ]èB2 0-«ƒ9þYJGB2Tgclocals·10971996d6a01a6d477c3318892d070fTgclocals·9b3781349ecf8ea1253d7ba626d001b4ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ."".(*Client).BuildImageÀºeH‹ %H„$ðþÿÿH;AwèëâHìHH‰$èH‹T$H¬$ H‰×H‰îèHÇ„$€HÇ„$ˆHƒz`u&H‹H‰œ$€H‹H‰œ$ˆHÄÃH¼$(1ÀèHœ$(Hƒû„£HÇÅHÇÁH‰œ$H‰¬$H‰Œ$ HH‰$H‰”$ˆH‰T$HD$ˆèH‹L$H‹D$H‹œ$H‰$H‰Œ$ÀH‰L$H‰„$ÈH‰D$èHH‰$H‹œ$ˆH‰\$HD$ÈèH‹L$H‹D$H‹œ$HƒÃH‰$H‰Œ$ÀH‰L$H‰„$ÈH‰D$èH‹œ$H‰$H‹œ$H‰\$H‹œ$ H‰\$èH‹„$ˆH‹\$H‰œ$€H‹L$ H‹T$(H‰”$˜HƒùH‰Œ$tH‰Œ$€H‰”$ˆHÄÃH‹˜€Hƒût.H‹XHƒûu$H‰$HhxH\$H‰ßH‰îH¥H¥èH‹„$ˆHƒxP……H‹˜ØHƒû…tH‹˜€Hƒûu&H‹H‰œ$€H‹H‰œ$ˆHÄÃH‹˜ØHƒû„ HƒxPt&H‹H‰œ$€H‹H‰œ$ˆHÄÃHÇ„$ HÇ„$¨H¨ÐH$H‰ßH‰îH¥H¥HhH\$H‰ßH‰îH¥H¥èH‹L$ H‹D$(H‹\$0H‰œ$ H‹\$8H‰œ$¨HH‰$H‰Œ$°H‰L$H‰„$¸H‰D$èH\$Hl$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰$Hƒ$PèH‹Œ$ H‹„$ˆHƒùt H‰Œ$€H‹œ$¨H‰œ$ˆHÄÃHH‰”$ÀH‰$H‰„$ÈH‰D$èH‹\$H‰œ$H‹\$H‰œ$Hœ$àHÇHÇCHœ$àHƒû„¡HÇÂHÇÅH‰œ$H‰”$H‰¬$ HH‰$Hœ$H‰\$èH‹T$H‹D$H‹œ$H‰$H‰”$ÀH‰T$H‰„$ÈH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$ H‰\$ èH‹„$ˆL‹D$(H‹T$0HH¬$HH‰ïH‰Þè¶Xpˆœ$IH‹¼$€H‰¼$PHhPH¼$XH‰îH¥H¥Hh`Hœ$hH‰ßH‰îH¥H¥H‹´$˜H‰4$H5Hl$H‰ïH¥H¥L‰„$ÐL‰D$H‰”$ØH‰T$ Hœ$HHl$(H‰ïH‰ÞèèH‹L$pH‹D$xH‰Œ$€H‰„$ˆHÄÉéXþÿÿHH‹+H‰¬$H‹kH‰¬$HH‹+H‰¬$ðH‹kH‰¬$øHH‰$H‹œ$€H‰\$Hœ$H‰\$Hœ$ðH‰\$èH‹„$ˆé?üÿÿ‰éVúÿÿP +00runtime.morestack_noctxtP2type."".BuildImageOptionsb"runtime.newobject’ð runtime.duffcopyÞ2"".ErrMissingOutputStreamü2"".ErrMissingOutputStreamºð runtime.duffzero¸2type."".AuthConfigurationöruntime.convT2Eà2runtime.writebarrierifaceî4type."".AuthConfigurations¬runtime.convT2Ež2runtime.writebarrierifaceô$"".headersWithAuthô4runtime.writebarrierstringä """.ErrMissingRepo‚ +""".ErrMissingRepoà +,"".ErrMultipleContextsþ +,"".ErrMultipleContexts¨ $"".createTarStreamþ type.io.ReaderÄ runtime.convI2I˜2runtime.writebarrieriface’4type.*"".BuildImageOptionsÎ"".queryString®type.stringÚruntime.convT2EÄ2runtime.writebarrierifaceÒ*go.string."/build?%s"Æfmt.Sprintfø""".statictmp_1373ž„ runtime.duffcopyÌ go.string."POST"È„ runtime.duffcopyÒ&"".(*Client).stream²0go.string."Content-Type"î6go.string."application/tar"ª,type.map[string]stringŠ$runtime.mapassign1ð &"".autotmp_1372*type."".streamOptions"".autotmp_1371"type.interface {}"".autotmp_1369&type.[]interface {}"".autotmp_1368¿$type.io.ReadCloser"".autotmp_1367"type.interface {}"".autotmp_1366Ÿ"type.interface {}"".autotmp_1364ÿ&type.[]interface {}"".autotmp_1362ÿtype.string"".autotmp_1361type.string"".autotmp_1359ß(type.[1]interface {}"".autotmp_1358¿type.string"".autotmp_1357Ÿtype.string"".autotmp_1356Ï(type.[2]interface {}"".&opts4type.*"".BuildImageOptions "".errßtype.error "".errÿtype.error"".headersŸ,type.map[string]string "".~r1Ðtype.error"".ctype.*"".Client^% lŸ øŸ ‰Ÿ =Ÿ òŸ ˆŸ ‘à fÈe&Ó$ &&» ‘ x { +>0Ê¿€¨…†vÆœTgclocals·689d5e2b826a4f8fae61c828d739d7d9Tgclocals·f991e5818d95c260e9075daec3edcda1ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ*"".(*Client).TagImage    eH‹ %HD$ H;AwèëåHìàHH‰$èH‹D$H¬$H‰ÇH‰îèHÇ„$(HÇ„$0H‹œ$øHƒûu&H‹H‰œ$(H‹H‰œ$0HÄàÃH‰ÁHH‰D$pH‰$H‰L$xH‰L$èH‹\$H‰œ$H‹\$H‰œ$˜Hœ$ HÇHÇCHœ$ Hƒû„ÎHÇÂHÇÁH‰œ$ÈH‰”$ÐH‰Œ$ØHH‰$Hœ$H‰\$èH‹D$H‹L$H‹œ$ÈH‰$H‰D$pH‰D$H‰L$xH‰L$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ðH‰\$H‹´$øH‰t$H5Hl$ H‰ïH¥H¥èH\$0H,$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$H‹œ$ÐH‰\$H‹œ$ØH‰\$ èH‹L$(H‹D$0Hœ$°HÇHÇCHÇCH‹´$èH‰4$H5Hl$H‰ïH¥H¥H‰Œ$€H‰L$H‰„$ˆH‰D$ Hœ$°Hl$(H‰ïH‰ÞH¥H¥H¥èH‹T$XH‹L$`H‹D$hHú”u&H‹H‰œ$(H‹H‰œ$0HÄàÃH‰Œ$(H‰„$0HÄàÉé+þÿÿ( +*0runtime.morestack_noctxtJ.type."".TagImageOptions\"runtime.newobjectŒô runtime.duffcopyæ""".ErrNoSuchImage„""".ErrNoSuchImage¸0type.*"".TagImageOptionsè"".queryStringÈtype.stringôruntime.convT2EÒ2runtime.writebarrierifaceà(go.string."/images/"¾&go.string."/tag?%s"à*runtime.concatstring3Þfmt.SprintfÖ go.string."POST"Þ "".(*Client).doœ +""".ErrNoSuchImageº +""".ErrNoSuchImage À"".autotmp_1389_"type."".doOptions"".autotmp_1388"type.interface {}"".autotmp_1386/&type.[]interface {}"".autotmp_1385¿type.string"".autotmp_1384Ÿtype.string"".autotmp_1382(type.[1]interface {} "".~r2€type.error"".nametype.string"".ctype.*"".Client2"Às¿Àš¿À¿ÀÐ4¨b&×BÄ & -ŒµâTgclocals·23803564b4b262dab15001f621fd3b37Tgclocals·c6e5a101f01f70a879acdb3760944b0dê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ"".isURLàÜeH‹ %H;awèëêHƒìPH‹\$XH‰$H‹\$`H‰\$èH‹T$H‹D$H‹\$ H‰\$8HƒøH‰D$0t +ÆD$hHƒÄPÃH‰T$(Hƒú„½H‹ +H‹BHƒø…¨H‰L$@H‰ $H‰D$HH‰D$H-LD$L‰ÇH‰îH¥H¥èH‹T$(¶\$ €ûtkHÇÀ<uKHƒútVH‹ +H‹BHƒøuBH‰L$@H‰ $H‰D$HH‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €ût +ÆD$hHƒÄPÃÆD$hëô‰ë¦1À똉é<ÿÿÿ + 0runtime.morestack_noctxt\net/url.Parse¢ go.string."http"Ê runtime.eqstringØ"go.string."https"€ runtime.eqstring0  "".autotmp_1396type.string"".autotmp_1395type.string "".err?type.error"".pO"type.*net/url.URL "".~r1 type.bool"".utype.string& @Ÿ ¼Ÿ °Ä,  +Õ-w[1Tgclocals·d7e8a62d22b1cde6d92b17a55c33fe8fTgclocals·2b892b6166a29da84b4f26d3316f1499ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ$"".headersWithAuthÀ¢eH‹ %H„$ÿÿÿH;AwèëâHìhHÇ„$HÇ„$˜HH‰$HÇD$èH‹\$H‰\$HH‹Œ$pH‹„$xH‹œ$€H‰œ$`1ÒH‰„$XH‰D$8H‰Œ$PH‰ÈH‹l$8H9êÃH‰D$XHƒø„ÓH‹H‹hH‰T$@H‰Œ$øH‰¬$H‰Œ$ÈH‰ $H‰¬$ÐH‰l$è‹L$‰L$4ùëhQ…ßHH‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$è‹L$4¶\$€û„£HH‰$èH‹D$H‰D$hH‰D$`H‹1íH9è„@H‹L$`H‰„$¸H‰D$xH‰Œ$ÀH‰Œ$€HH‰$èH‹L$H‰ÏHƒù„õ1ÀèH‰L$PH‰ $Hƒ<$„ÎH‹\$xH‰\$H‹œ$€H‰\$èH‹\$PH‰$H‹œ$øH‰\$H‹œ$H‰\$èH‹L$H‹D$ H‰„$ HƒùH‰Œ$˜t$HÇ„$ˆH‰Œ$H‰„$˜HÄhÃHH‹+H‰¬$èH‹kH‰¬$ðH‹L$hH‹yH‹QH‹AH9‚H‹ H‰ÖH)ÆH‰úH)ÂHƒút H‰ÃHËH‰ÙH‰Œ$8H‰´$@H‰”$HH‹H‰$H‰Œ$ H‰L$H‰´$(H‰t$H‰”$0H‰T$èH‹\$ H‰œ$ØH‹\$(H‰œ$àHH‰$H‹\$HH‰\$Hœ$èH‰\$Hœ$ØH‰\$èH‹D$XH‹T$@HƒÀHÿÂH‹l$8H9êŒ=ýÿÿH‹\$HH‰œ$ˆHÇ„$HÇ„$˜HÄhÃè ‰%é&þÿÿ‰éþÿÿHH‰$HH‰\$HH‰\$èH‹D$éŽýÿÿùÎWÆ[…\ÿÿÿHH‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$è¶\$€û„$ÿÿÿHH‰$èH‹D$H‰D$pH‰D$`H‹1íH9è„ÿH‹L$`H‰„$¸H‰„$ˆH‰Œ$ÀH‰Œ$HH‰$èH‹L$H‰ÏHƒù„±1ÀèH‰L$PH‰ $Hƒ<$„ŠH‹œ$ˆH‰\$H‹œ$H‰\$èH‹\$PH‰$H‹œ$øH‰\$H‹œ$H‰\$èH‹L$H‹D$ H‰„$°HƒùH‰Œ$¨t$HÇ„$ˆH‰Œ$H‰„$˜HÄhÃHH‹+H‰¬$èH‹kH‰¬$ðH‹D$pH‹xH‹PH‹HH9Ê‚ÀH‹H‰ÖH)ÎH‰úH)ÊHƒút H‰ËHÃH‰ØH‰„$8H‰´$@H‰”$HH‹H‰$H‰„$H‰D$H‰´$H‰t$H‰”$H‰T$èH‹\$ H‰œ$ØH‹\$(H‰œ$àHH‰$H‹\$HH‰\$Hœ$èH‰\$Hœ$ØH‰\$èéýÿÿè ‰%éjþÿÿ‰éHþÿÿHH‰$HH‰\$HH‰\$èH‹D$éÏýÿÿ‰é&úÿÿZ +00runtime.morestack_noctxt€,type.map[string]string¤runtime.makemapÚ$runtime.efacethash2type."".AuthConfigurationÖ&runtime.assertE2TOKˆ"type.bytes.Bufferš"runtime.newobjectÆ>go.itab.*bytes.Buffer.io.Writer®4type.encoding/json.EncoderÀ"runtime.newobjectòð runtime.duffzeroÒ2runtime.writebarrieriface¢>encoding/json.(*Encoder).Encode¸ 6go.string."X-Registry-Auth"” 6encoding/base64.URLEncodingô Tencoding/base64.(*Encoding).EncodeToString¶ ,type.map[string]string $runtime.mapassign1²$runtime.panicsliceê$type.*bytes.Buffer€type.io.Writer˜>go.itab.*bytes.Buffer.io.Writer¬ runtime.typ2Itabæ4type."".AuthConfigurations¬&runtime.assertE2TOKÖ"type.bytes.Bufferè"runtime.newobject”>go.itab.*bytes.Buffer.io.Writer‚4type.encoding/json.Encoder”"runtime.newobjectÆð runtime.duffzero¬2runtime.writebarrierifaceü>encoding/json.(*Encoder).Encode’:go.string."X-Registry-Config"î6encoding/base64.URLEncodingÎTencoding/base64.(*Encoding).EncodeToString,type.map[string]stringê$runtime.mapassign1þ$runtime.panicslice¶$type.*bytes.BufferÌtype.io.Writerä>go.itab.*bytes.Buffer.io.Writerø runtime.typ2Itab`ÐN"".autotmp_1430type.uint64"".autotmp_1429type.uint64"".autotmp_1428type.uint64"".autotmp_1427type.[]uint8"".autotmp_14266type.*encoding/json.Encoder"".autotmp_14256type.*encoding/json.Encoder"".autotmp_1424type.*uint8"".autotmp_1423type.io.Writer"".autotmp_1419_type.[]uint8"".autotmp_1418¯6type.*encoding/json.Encoder"".autotmp_14176type.*encoding/json.Encoder"".autotmp_1415ßtype.io.Writer"".autotmp_1414çtype.uint32"".autotmp_1412"type.interface {}"".autotmp_1411¿"type.interface {}"".autotmp_1410Ÿ$type.*interface {}"".autotmp_1409ßtype.int"".autotmp_1408Ïtype.int"".autotmp_1407type.string"".autotmp_1406type.string"".autotmp_1405type.error"".autotmp_1404$type.*bytes.Buffer"".autotmp_1403Ÿtype.string"".autotmp_1402ÿtype.string"".autotmp_1400$type.*bytes.Buffer"".autotmp_1399/&type.[]interface {}"".&bufï$type.*bytes.Buffer"".&bufÿ$type.*bytes.Buffer "".~r0¿type.[]uint8$encoding/json.w·2¿type.io.Writer "".~r0type.[]uint8$encoding/json.w·2ßtype.io.Writer "".errÿtype.error "".errŸtype.error"".authß"type.interface {}"".headers¿,type.map[string]string "".~r2@type.error "".~r10,type.map[string]string"".auths&type.[]interface {}8%гÏоÏЭÏÐÚ  `Ô=#c~ß$ó "-EDå$óETQ›`SI(éNQ=@tL(é N +=%Tgclocals·afcded8c13354e18af605d7f21ec25feTgclocals·7a03355e34b75c37acf5eff7bf674ad4ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goþ2"".(*Client).SearchImages€ +ø eH‹ %HD$¸H;AwèëåHìÈHÇ„$èHÇ„$ðHÇ„$øHÇ„$HÇ„$Hœ$°HÇHÇCHÇCHH,$H‰ïH‰ÞH¥H¥H‹œ$ØH‰\$H‹œ$àH‰\$èH\$ Hl$H‰ïH‰ÞH¥H¥H‹´$ÐH‰4$H5Hl$H‰ïH¥H¥Hœ$°Hl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$˜H‹\$HH‰œ$ H‹\$PH‰œ$¨H‹D$`H‹L$hH‰Œ$€HƒøH‰D$xtgo.itab.*bytes.Reader.io.Reader– "".(*Env).Decodeæ $type.*bytes.Readerü type.io.Reader” +>go.itab.*bytes.Reader.io.Reader¨ + runtime.typ2Itab@Ð"".autotmp_1448ï$type.*bytes.Reader"".autotmp_1447$type.*bytes.Reader"".autotmp_1446/"type."".doOptions"".autotmp_1444$type.*bytes.Reader"".&envßtype.*"".Envbytes.b·2type.[]uint8 "".errÏtype.error"".body_type.[]uint8 "".~r1 type.error "".~r0type.*"".Env"".ctype.*"".Client4"ÐôÏЫÏÐ,ÏÐ@°* :«$î$-@"§{JEY‰Tgclocals·9cf15d8275d9c299f023024ca604cf90Tgclocals·41bb44495be0a59dc118277b1d9139f9è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.goþ""".(*Client).Infoà +Ü +eH‹ %HD$˜H;AwèëåHìèHÇ„$HÇ„$Hœ$ÐHÇHÇCHÇCH‹´$ðH‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥Hœ$ÐHl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$¸H‹\$HH‰œ$ÀH‹\$PH‰œ$ÈH‹D$`H‹L$hH‰Œ$ˆHƒøH‰„$€t$HÇ„$øH‰„$H‰Œ$HÄèÃHH‰$èH‹\$H‰\$xH‹œ$¸H‰œ$ H‹œ$ÀH‰œ$¨H‹œ$ÈH‰œ$°HH‰$èH‹D$H‰D$pH‰$Hƒ<$„H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$èH‹D$pHÇ@HÇ@ ÿÿÿÿH‰D$pH‹1íH9è„ŽH‹\$xH‰$H‹L$pH‰„$H‰D$H‰Œ$˜H‰L$èH‹D$H‹L$ Hƒøt$HÇ„$øH‰„$H‰Œ$HÄèÃH‹\$xH‰œ$øHÇ„$HÇ„$HÄèÃHH‰$HH‰\$HH‰\$èH‹D$é@ÿÿÿ‰%éÜþÿÿ +*0runtime.morestack_noctxtÐgo.string."GET"ö"go.string."/info"Ð"".(*Client).do´type."".EnvÆ"runtime.newobjectÈ"type.bytes.ReaderÚ"runtime.newobjectä2runtime.writebarrierslice¦>go.itab.*bytes.Reader.io.Reader– "".(*Env).Decodeæ $type.*bytes.Readerü type.io.Reader” +>go.itab.*bytes.Reader.io.Reader¨ + runtime.typ2Itab@Ð"".autotmp_1456ï$type.*bytes.Reader"".autotmp_1455$type.*bytes.Reader"".autotmp_1454/"type."".doOptions"".autotmp_1452$type.*bytes.Reader"".&infoßtype.*"".Envbytes.b·2type.[]uint8 "".errÏtype.error"".body_type.[]uint8 "".~r1 type.error "".~r0type.*"".Env"".ctype.*"".Client4"ÐôÏЫÏÐ,ÏÐ@°.>:«$è$-@"§{JEY‰Tgclocals·9cf15d8275d9c299f023024ca604cf90Tgclocals·41bb44495be0a59dc118277b1d9139f9è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.goþ*"".ParseRepositoryTagàÌeH‹ %H;awèëêHƒì@HÇD$hHÇD$pHÇD$XHÇD$`H‹\$HH‰$H‹t$PH‰t$H5Hl$H‰ïH¥H¥èH‹t$HH‹L$PH‹D$ Hƒø}!H‰t$XH‰L$`HÇD$hHÇD$pHƒÄ@ÃH‰ÃH‰D$(HÿÃH‰ÊH9Ù‚©H‰ðH)ÚHƒútHÃH‰ØH‰D$0H‰$H‰T$8H‰T$HHl$H‰ïH‰ÞH¥H¥èH‹T$HH‹L$P¶\$ €ûu4H‹D$(H9Ár#H‰T$XH‰D$`H‹\$0H‰\$hH‹\$8H‰\$pHƒÄ@Ãè H‰T$XH‰L$`HÇD$hHÇD$pHƒÄ@Ãè  + 0runtime.morestack_noctxt¨go.string.":"Ê"strings.LastIndex¼go.string."/"ä strings.Containsð$runtime.panicsliceÀ$runtime.panicslice`€"".autotmp_1467type.uint64"".autotmp_1466type.uint64"".autotmp_1463type.uint64"".autotmp_1462type.int "".tagtype.string"".n/type.int "".tag@type.string"".repository type.string"".repoTagtype.string.€„€—€'€ð"f>:!k4!dF9Tgclocals·ca1ebfc68aaed1d083688775167e5178Tgclocals·8d600a433c6aaa81a4fe446d95c5546bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.goþ2"".(*Client).ListNetworks  ” eH‹ %HD$¸H;AwèëåHìÈHÇ„$ØHÇ„$àHÇ„$èHÇ„$ðHÇ„$øHœ$°HÇHÇCHÇCH‹´$ÐH‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥Hœ$°Hl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$˜H‹\$HH‰œ$ H‹\$PH‰œ$¨H‹D$`H‹L$hH‰Œ$€HƒøH‰D$xtgo.itab.*"".NoSuchNetwork.errorš,type.*"".NoSuchNetwork°type.errorÈ>go.itab.*"".NoSuchNetwork.errorÜ runtime.typ2Itabätype."".Networkö"runtime.newobjectä  type.*"".Network¢ +.encoding/json.Unmarshal`À"".autotmp_1482ß,type.*"".NoSuchNetwork"".autotmp_1481/"type."".doOptions"".autotmp_1478,type.*"".NoSuchNetwork"".&networkÏ type.*"".Network "".errŸtype.error"".body_type.[]uint8"".path¿type.string "".~r2@type.error "".~r10 type.*"".Network +"".idtype.string"".ctype.*"".Client:"À§¿Àd¿À™¿À,¿ €2p:>¼ Ä$a$7i›¶VpTgclocals·be34fa03b4e4d696adaf8f647f7704fdTgclocals·0c8fa0fcc4836d09a64d3d20b95663feî/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.goþ4"".(*Client).CreateNetwork  Ž eH‹ %HD$ˆH;AwèëåHìøHÇ„$8HÇ„$@Hœ$H¬$ÐH‰ïH‰ÞèHœ$¸HÇHÇCHÇCHH‰$Hœ$ÐH‰\$èH\$H¬$¸H‰ïH‰ÞH¥H¥H‹´$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥Hœ$¸Hl$(H‰ïH‰ÞH¥H¥H¥èH‹\$@H‰œ$ H‹\$HH‰œ$¨H‹\$PH‰œ$°H‹L$XH‹D$`H‹T$hH‰”$ˆHù™u2HÇ„$0H‹H‰œ$8H‹H‰œ$@HÄøÃHƒøH‰„$€t$HÇ„$0H‰„$8H‰”$@HÄøÃHH‰$èH‹\$H‰\$xHH‰$èH‹L$H‰L$pH‹œ$ H‰$H‹œ$¨H‰\$H‹œ$°H‰\$HH‰„$H‰D$H‰Œ$˜H‰L$ èH‹L$(H‹D$0H‰„$ˆHƒùH‰Œ$€t$HÇ„$0H‰Œ$8H‰„$@HÄøÃH‹t$xH‰4$H´$Hl$H‰ïH¥H¥èH‹\$xH‰$Hƒ$H‹|$pH/H|$H‰îH¥H¥èH‹t$xH‰4$Hƒ$ H´$Hl$H‰ïH¥H¥èH‹\$xH‰œ$0HÇ„$8HÇ„$@HÄøÃ& +*0runtime.morestack_noctxt¢ô runtime.duffcopyî8type."".CreateNetworkOptionsšruntime.convT2Eî go.string."POST"”*go.string."/networks"î"".(*Client).do¢4"".ErrNetworkAlreadyExistsÀ4"".ErrNetworkAlreadyExistsÒtype."".Networkä"runtime.newobject†@type."".createNetworkResponse·1˜"runtime.newobject†Btype.*"".createNetworkResponse·1Ä.encoding/json.Unmarshal +4runtime.writebarrierstringÞ +4runtime.writebarrierstring¬ 4runtime.writebarrierstringð"".autotmp_1490"type."".doOptions"".autotmp_1487O8type."".CreateNetworkOptions"".&networkÿ type.*"".Network"".&respBtype.*"".createNetworkResponse·1 "".errïtype.error"".body¯type.[]uint8 "".~r2ptype.error "".~r1` type.*"".Network"".opts8type."".CreateNetworkOptions"".ctype.*"".Client<"ðÑïð1ïðÃïðœï +T¬: +S > 2$c$"''6$Œj»VbTgclocals·81381a8f40f0e35a38db28a8bb50de11Tgclocals·35aa8cef5f531c9de8f76600ceb85b27î/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.goþ2"".(*NoSuchNetwork).ErroràÜeH‹ %H;awèëêHƒìpHÇ„$€HÇ„$ˆH\$HHÇHÇCH\$HHƒû„ÒHÇÂHÇÁH‰\$XH‰T$`H‰L$hHH‰$H‹\$xH‰\$Hƒ|$„ˆèH‹L$H‹D$H‹\$XH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$ èH‹L$(H‹D$0H‰Œ$€H‰„$ˆHƒÄpÉ%élÿÿÿ‰é'ÿÿÿ + 0runtime.morestack_noctxtêtype.string¨runtime.convT2E€2runtime.writebarrierifaceŽ>go.string."No such network: %s"ðfmt.Sprintf0à +"".autotmp_1497o"type.interface {}"".autotmp_1495/&type.[]interface {}"".autotmp_1493O(type.[1]interface {} "".~r0type.string "".err,type.*"".NoSuchNetworkà€ßà°ü2þ“d9Tgclocals·6d340c3bdac448a6ef1256f331f68dd3Tgclocals·403a8d79fd24b295e8557f6970497aa3î/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.goþ$"".createTarStreamÀ´eH‹ %H„$àþÿÿH;AwèëâHì HÇ„$ÈHÇ„$ÐHÇ„$ØHÇ„$àH‹œ$¨H‰$H‹œ$°H‰\$èH‹\$H‰œ$H‹\$H‰œ$H‹\$ H‰œ$H‹D$(H‹L$0H‰Œ$˜HƒøH‰„$t0HÇ„$ÈHÇ„$ÐH‰„$ØH‰Œ$àHÄ ÃHH‰$èH‹D$Hƒø„ +H-H‰ÇH‰îH¥H¥HÇÆHÇÂH‰„$ÐH‰´$ØH‰”$àHœ$`Hƒû„ºH5H‰ßèHÇÁHÇÂH‰Œ$ðH‰”$øH‰œ$èHƒÃH‰$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹´$èH‹„$ðH‹œ$øH‰œ$X1ÿH‰„$PH‰D$@H‰´$HH‹l$@H9ï}PH‰t$XHƒþ„H‹H‹nH‰|$HH‰Œ$°H‰L$`H‰¬$¸H‰l$hHƒý…êHƒÆHÿÇH‹l$@H9ï|°H‹œ$¨H‰$H‹œ$°H‰\$H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$ èH‹D$(H‹L$0H‰Œ$ˆHƒøH‰„$€t0HÇ„$ÈHÇ„$ÐH‰„$ØH‰Œ$àHÄ ÃHH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$PH‰ $Hƒ<$„ôHƒ$H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$èH‹\$PH‰$Hƒ<$„£H‹œ$ÐH‰\$H‹œ$ØH‰\$H‹œ$àH‰\$èH‹D$PHÇ@0HÇÅ@ˆh8H‹œ$¨H‰$H‹œ$°H‰\$H‰D$èH‹L$H‹D$ H‹l$(H‹T$0H‰Œ$ÈH‰„$ÐH‰¬$ØH‰”$àHÄ É%éQÿÿÿ‰%éÿÿÿ‰éÞþÿÿH‰ $H‰l$H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$ èH‹|$HH‹t$X¶\$(H‰ØH‹L$0H‹\$8H‰\$xHƒùH‰L$p„†H‹\$`H‰œ$ÀH‹\$hH‰œ$ÈH¼$€1ÀèHœ$€Hƒû„DHÇÂHÇÁH‰œ$0H‰”$8H‰Œ$@HH‰$Hœ$ÀH‰\$èH‹L$H‹D$H‹œ$0H‰$H‰Œ$ H‰L$H‰„$¨H‰D$èH‹\$pH‰$H‹\$xH‰\$èH‹L$H‹D$H‹œ$0HƒÃH‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$0H‰\$H‹œ$8H‰\$H‹œ$@H‰\$ èH‹L$(H‹D$0HÇ„$ÈHÇ„$ÐH‰Œ$ØH‰„$àHÄ Ééµþÿÿ<„#üÿÿH‹”$ÐH‹Œ$ØH‹„$àH‰ÃH)ËHƒû}OHH‰$H‰”$H‰T$H‰Œ$ H‰L$H‰„$(H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰ÓH‰´$ H‰„$(H‰”$H‰ÍHkíHëH‰$H‹\$`H‰\$H‹\$hH‰\$èH‹|$HH‹t$XH‹”$H‹Œ$ H‹„$(H‰”$ÐH‰Œ$ØH‰„$àé)ûÿÿ‰éòúÿÿ‰é?úÿÿ‰éïùÿÿ8 +00runtime.morestack_noctxtÞ("".parseDockerignoreÚtype.[1]stringì"runtime.newobject˜""".statictmp_1509ª""".statictmp_1511º runtime.duffcopyÔ4runtime.writebarrierstring€ +6"".validateContextDirectory® ¾type.github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.TarOptionsÀ "runtime.newobjectò Ð runtime.duffzeroü 2runtime.writebarriersliceü 2runtime.writebarriersliceò¼github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.TarWithOptions’²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.MatchesÄð runtime.duffzeroÂtype.stringîruntime.convT2EØ2runtime.writebarrierifaceˆruntime.convI2Eú2runtime.writebarrierifaceˆjgo.string."cannot match .dockerfile: '%s', error: %s"üfmt.Errorfätype.[]stringÖ"runtime.growslice„4runtime.writebarrierstring€À8"".autotmp_1527ŸÀtype.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.TarOptions"".autotmp_1524type.int"".autotmp_1523type.int"".autotmp_1522type.[]string"".autotmp_1521"type.interface {}"".autotmp_1520ÿ"type.interface {}"".autotmp_1518ß&type.[]interface {}"".autotmp_1517ßtype.string"".autotmp_1516type.*string"".autotmp_1515type.int"".autotmp_1514type.int"".autotmp_1513type.[2]string"".autotmp_1508type.error"".autotmp_1506type.error"".autotmp_1503¿type.string"".autotmp_1502?(type.[2]interface {}"".autotmp_1501¯type.[]string "".err¿type.error "".errßtype.error"".includeFileÿtype.string("".forceIncludeFilesïtype.[]string"".includesŸtype.[]string "".errŸtype.error"".excludes¿type.[]string "".~r3`type.error "".~r2@$type.io.ReadCloser""".dockerfilePath type.string"".srcPathtype.stringF%ÀÄ¿Àé¿À¤¿À‚¿À¤ `*UW0Vnqe0 Æ_V†õ  Hn‡´Ö¾@;®Mz­W_Tgclocals·d01647b6fcc19f6b40c264ab6c580992Tgclocals·4df3d887804869ca0d16462c47a4175fæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.goþ6"".validateContextDirectory€èeH‹ %HD$øH;AwèëåHìˆHÇ„$¸HÇ„$ÀH\$hHƒû„äH-H‰ßH‰îèHÇÂHÇÁH‰T$@H‰L$HH‰\$8H‰$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$èH‹T$H‹L$ H\$PH-H‰+H¬$H‰kH¬$ H‰kH‰T$(H‰$H‰L$0H‰L$H‰\$èH‹L$H‹D$ H‰Œ$¸H‰„$ÀHĈÉéÿÿÿ +*0runtime.morestack_noctxt˜""".statictmp_1545® runtime.duffcopy®4runtime.writebarrierstringò$path/filepath.Joinž"".func·008Ž$path/filepath.Walkp"".autotmp_1544Ÿtype.[]string"".autotmp_1542oftype.struct { F uintptr; A0 *string; A1 *[]string }"".autotmp_1541¿type.string"".autotmp_1540?type.[2]string "".~r2Ptype.error"".excludes type.[]string"".srcPathtype.string"ŠÀˆ:JfI–"N:Tgclocals·e3c75ef39e8363f5b00a257bd2be7adbTgclocals·7546955fbaa0a8a5c520077bd4d47105æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.goþ("".parseDockerignoreà Ð eH‹ %HD$ˆH;AwèëåHìøHÇ„$HÇ„$HÇ„$ HÇ„$(HÇ„$0HÇ„$HÇ„$˜HÇ„$ Hœ$ØHƒû„ÍH-H‰ßH‰îèHÇÂHÇÁH‰”$ÈH‰Œ$ÐH‰œ$ÀH‰$H‹œ$H‰\$H‹œ$H‰\$èH‹œ$ÀH‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$èH‹L$H‹D$ H‰L$hH‰ $H‰D$pH‰D$èH‹\$H‰\$xH‹\$H‰œ$€H‹\$ H‰œ$ˆH‹D$(H‹L$0H‰L$@HƒøH‰D$8„FH‰$H‰L$è¶\$€û…*H\$XHÇHÇCH\$XHƒû„HÇÁHÇÂH‰œ$¨H‰Œ$°H‰”$¸H‹\$8H‰$H‹\$@H‰\$èH‹L$H‹D$H‹œ$¨H‰$H‰L$HH‰L$H‰D$PH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$¨H‰\$H‹œ$°H‰\$H‹œ$¸H‰\$ èH‹L$(H‹D$0H‹œ$H‰œ$H‹œ$˜H‰œ$H‹œ$ H‰œ$ H‰Œ$(H‰„$0HÄøÉéùþÿÿH‹\$xH‰$H‹œ$€H‰\$H‹œ$ˆH‰\$èH\$H,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹T$ H‹L$(H‹D$0H‰”$H‰Œ$H‰„$ HÇ„$(HÇ„$0HÄøÉé,ýÿÿ +*0runtime.morestack_noctxt®""".statictmp_1558Ä runtime.duffcopyÖ4runtime.writebarrierstring¬path.Joinð$io/ioutil.ReadFileos.IsNotExistîruntime.convI2EÌ2runtime.writebarrierifaceÚZgo.string."error reading .dockerignore: '%s'"Î fmt.ErrorfÐ 2runtime.slicebytetostring„ go.string."\n"¬ strings.Splitpð"".autotmp_1562ß"type.interface {}"".autotmp_1560Ÿ&type.[]interface {}"".autotmp_1557otype.[]string"".autotmp_1556type.[]string"".autotmp_1554¿(type.[1]interface {}"".autotmp_1552Ÿtype.string"".autotmp_1551?type.[2]string "".errÿtype.error"".ignoreÿtype.[]uint8"".excludesÏtype.[]string "".~r2Ptype.error "".~r1 type.[]string"".roottype.string("ðšïð£ïðð(Ú^$í+ªe8 $ê+rop ‰Tgclocals·2f519926ed4d9241bccfb3ede7c3f0baTgclocals·738657360054077c9c40a6546341a136æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.goþ:"".(*tlsClientCon).CloseWrite€æeH‹ %H;awèëêHƒì@HÇD$PHÇD$XHH‰$H‹|$HHƒÿtmHoH|$H‰îH¥H¥èH‹T$H‹L$ ¶\$(€ût-H‰L$8H‰ $H‰T$0H‹Z ÿÓH‹L$H‹D$H‰L$PH‰D$XHƒÄ@ÃHÇD$PHÇD$XHƒÄ@Éë + + 0runtime.morestack_noctxt^Jtype.interface { CloseWrite() error }¦$runtime.assertI2I2ú +0€ "".cwcJtype.interface { CloseWrite() error } "".~r0type.error"".c*type.*"".tlsClientCon €}€€À0, ++- +RnTgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.goþ("".tlsDialWithDialer€öeH‹ %H„$pÿÿÿH;AwèëâHìH‹œ$HÇ„$HHÇ„$PHÇ„$XHÇ„$`H‹+H‰l$HH‹K‹C‰„$èH‹kH‰¬$ðH‰Œ$àHƒù…ãƒø…ÚHÇÀ<u|èH‹$‹L$H‹D$H‹¼$Hƒÿ„¤HoH<$H‰îH¥H¥H¥H‰”$øH‰T$‰Œ$‰L$ H‰„$H‰D$(èH‹L$HH‹D$0Hƒù„KH9ÈŒBHH‰$èH‹\$H‰\$xH‹\$HHƒû„HH‰$HÇD$èH‹D$H‹\$xH‰$H‰D$èHH‰$èH‹D$H-H‰(H‰D$pH‰$Hƒ<$„©Hƒ$H‹\$xH‰\$èH‹\$HH‰$H‹\$pH‰\$èH‹œ$H‰$H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$H‹œ$8H‰\$ èH‹\$(H‰œ$H‹\$0H‰œ$˜H‹D$8H‹L$@H‰Œ$¸HƒøH‰„$°t0HÇ„$HHÇ„$PH‰„$XH‰Œ$`HÄÃH‹œ$0H‰$H‹´$8H‰t$H5Hl$H‰ïH¥H¥èH‹Œ$8H‹D$ HƒøÿuH‰ÈH9Á‚wH‹Œ$0H‰Œ$ H‰„$¨H‹œ$@H‹[hHƒû…‹HH‰$èH‹D$HH‰$H‰„$ˆH‰D$H‹œ$@H‰\$Hƒ|$„÷èH‹œ$ˆH‰$Hƒ$`H‹œ$ H‰\$H‹œ$¨H‰\$èH‹œ$ˆH‰œ$@HH‰$èH‹\$H‰œ$€H‹œ$H‰œ$ÀH‹œ$˜H‰œ$ÈH‹œ$@H‰\$PHH‰$èH‹L$H‰ÏHƒù„?1ÀèH‰L$hH‰ $Hƒ<$„H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹\$hH‰$Hƒ<$„ÙHƒ$8H‹\$PH‰\$èH‹D$hHÇÅ@ˆhH‹œ$€H‰$H‰D$èH‹\$HHƒû…˜H‹œ$€H‹+H‰,$èH‹L$H‹D$H‰„$¸HƒùH‰Œ$°tZH‹œ$˜H‰$H‹œ$H‹[ ÿÓHÇ„$HHÇ„$PH‹œ$°H‰œ$XH‹œ$¸H‰œ$`HÄÃHH‰$èH‹D$H‰D$XH‰$Hƒ<$„ÕH‹œ$€H‹+H‰l$èH‹\$XH‰$Hƒ<$„ Hƒ$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$XH‰\$XH‹1íH9èt5H‹\$XH‰œ$PH‰„$HHÇ„$XHÇ„$`HÄÃHH‰$HH‰\$HH‰\$èH‹D$뜉%éTÿÿÿ‰%éÿÿÿHH‰$èH‹D$H-H‰(H‰D$`H‰$Hƒ<$„¹Hƒ$H‹\$xH‰\$èH‹\$`H‰$Hƒ<$„…Hƒ$H‹œ$€H‰\$èH‹\$`SjèYYHÇ„$ÐHÇ„$ØHH‰$H‹\$xH‹+H‰l$Hœ$ÐH‰\$èH‹Œ$ÐH‹œ$ØH‰œ$¸é®ýÿÿ‰%éoÿÿÿ‰%é;ÿÿÿ‰%éýÿÿ‰%éÜüÿÿ‰éºüÿÿ‰%éýûÿÿè ‰%éKúÿÿH‰D$Hé´ùÿÿ‰éUùÿÿ1Àé&ùÿÿd +00runtime.morestack_noctxtÌtime.Now€time.Time.SubÈtype.chan errorÚ"runtime.newobjectštype.chan error¾ runtime.makechanî.runtime.writebarrierptrüRtype.struct { F uintptr; A0 *chan error }Ž"runtime.newobject¦"".func·009ü.runtime.writebarrierptr¬time.AfterFunc¶$net.(*Dialer).DialÊ +go.string.":"ì +"strings.LastIndex” ,type.crypto/tls.Config¦ "runtime.newobject¾ ,type.crypto/tls.Configœ .runtime.writebarrierfatü 4runtime.writebarrierstringª*type.*crypto/tls.Conn¼"runtime.newobject¾(type.crypto/tls.ConnÐ"runtime.newobject‚´ runtime.duffzeroè2runtime.writebarrieriface¸.runtime.writebarrierptr„.runtime.writebarrierptrÊ8crypto/tls.(*Conn).Handshake +Ì(type."".tlsClientConÞ"runtime.newobjectº.runtime.writebarrierptrª2runtime.writebarrierifaceÌBgo.itab.*"".tlsClientCon.net.ConnÒ*type.*"".tlsClientConètype.net.Conn€Bgo.itab.*"".tlsClientCon.net.Conn” runtime.typ2Itabà~type.struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }ò"runtime.newobjectŠ"".func·010à.runtime.writebarrierptr¶.runtime.writebarrierptrÐruntime.newproc’type.chan errorØ"runtime.chanrecv1¢$runtime.panicslice  2"".autotmp_1585ï*type.*"".tlsClientCon"".autotmp_1584߀type.*struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }"".autotmp_1583Ï*type.*crypto/tls.Conn"".autotmp_1582*type.*crypto/tls.Conn"".autotmp_1579¿Ttype.*struct { F uintptr; A0 *chan error }"".autotmp_1577*type.*"".tlsClientCon"".autotmp_1576type.error"".autotmp_1574type.int"".autotmp_1570/type.time.Time"".&connŸ,type.**crypto/tls.Conn"".&errChannel¯ type.*chan error +"".&c.type.*crypto/tls.Config(crypto/tls.config·3ÿ.type.*crypto/tls.Config$crypto/tls.conn·2Ÿtype.net.Conntime.t·2_type.time.Time"".hostnameßtype.string "".err¿type.error"".rawConnÿtype.net.Conn"".timeout$type.time.Duration "".~r5€type.error "".~r4`type.net.Conn"".configP.type.*crypto/tls.Config"".addr0type.string"".networktype.string"".dialer type.*net.DialerJ% ãŸ ™Ÿ ÂŸ ¿Ò€¤F]@i +1_q0>!K0ô&@ +ŠZ      j¥¹_à];0 JL(&#=M.8¤ 78De0Tgclocals·0389232f9bf0423206204d8b27e58130Tgclocals·a6f85fd4ba75b8cdab35ab28e50023d9æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.goþ"".tlsDialÀ¦eH‹ %H;awèëêHƒìPHÇ„$€HÇ„$ˆHÇ„$HÇ„$˜HH‰$èH‹\$H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èH‹l$0H‹T$8H‹L$@H‹D$HH‰¬$€H‰”$ˆH‰Œ$H‰„$˜HƒÄPà + + 0runtime.morestack_noctxtštype.net.Dialer¬"runtime.newobject¬("".tlsDialWithDialer  + "".~r4ptype.error "".~r3Ptype.net.Conn"".config@.type.*crypto/tls.Config"".addr type.string"".networktype.string ¸ŸàÆJ– U‹Tgclocals·d85453ba2fc2b16513844b65495ea6c3Tgclocals·3280bececceccd33cb74587feedb1f9fæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.goþ"".func·001    eH‹ %HD$èH;AwèëåHì˜H‹JH‹ZH‰\$XH‹ZH‰\$PH‹Z H‰\$`H‹)H‰,$H QjèYYH…À…àH‹\$XH‹+H‰,$H QjèL‹D$`H‹T$pYYH…À…¢HÇD$hHÇD$pA¶X€û„àH‹*H‰l$HH‹1íH9è„Ih H$H‰ßH‰îH¥H¥H‹L$HH‰D$xH‰D$H‰Œ$€H‰L$èH‹T$(H‹L$0H‰T$hH‰”$ˆH‰L$pH‰Œ$HH‰$H‹\$XH‹+H‰l$Hœ$ˆH‰\$èèHĘÃHH‰$HH‰\$HH‰\$èL‹D$PH‹D$é:ÿÿÿH‹*H‰l$HH‹ 1íH9étUIh H$H‰ßH‰îH¥H¥Ih0H\$H‰ßH‰îH¥H¥H‹T$HH‰L$xH‰L$ H‰”$€H‰T$(èH‹T$8H‹L$@é ÿÿÿHH‰$HH‰\$HH‰\$èL‹D$PH‹L$étÿÿÿèHĘÃèHĘÃ. +*"runtime.morestack–(runtime.closechan·f¦"runtime.deferprocâ(runtime.closechan·fò"runtime.deferprocú>go.itab.*bufio.Reader.io.Readeröio.CopyÌtype.chan error’"runtime.chansend1ž&runtime.deferreturn¼$type.*bufio.ReaderÒtype.io.Readerê>go.itab.*bufio.Reader.io.Readerþ runtime.typ2Itabº>go.itab.*bufio.Reader.io.ReaderÔ®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdCopy€$type.*bufio.Reader–type.io.Reader®>go.itab.*bufio.Reader.io.Reader runtime.typ2Itabì&runtime.deferreturnˆ &runtime.deferreturn°"".autotmp_1597type.*uint8"".autotmp_1595type.error"".autotmp_1594$type.*bufio.Reader"".autotmp_1593Ÿ$type.*bufio.Reader "".&bro&type.**bufio.Reader""".&hijackOptions,type.*"".hijackOptions"".&errChanOut type.*chan error "".err_type.errorN"°.ѯ°æ¯° ¯Ð<Ò A!0WD 7f7 (R¨N1k7Tgclocals·7c13896baab3273e10662a9a37b348ceTgclocals·51d2fd2674ba9ccfd7abd80151d2e032ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ"".func·002À¢eH‹ %H;awèëêHì€H‹jH‹JH‰L$8H‹BH‰D$HH‰l$@Hƒ}„ØHH‰$Hl$H‰ïH‰ÎH¥H¥èH\$H,$H‰ïH‰ÞH¥H¥H‹|$@HoH|$H‰îH¥H¥èH‹L$(H‹D$0H‰L$PH‰L$pH‰D$XH‰D$xHH‰$H‹\$HH‹+H‰l$H\$pH‰\$èHH‰$H‹t$8Hl$H‰ïH¥H¥èH‹L$H‹D$ H‰D$hH‰$H‰L$`H‹Y ÿÓHÄ€ÃHÇD$pHÇD$xHH‰$H‹(H‰l$H\$pH‰\$èëƒ + "runtime.morestackŒtype.io.Writer¼runtime.convI2I–io.Copyàtype.chan error "runtime.chansend1®Jtype.interface { CloseWrite() error }â"runtime.assertI2I¢ +àtype.chan error–"runtime.chansend1€ "".autotmp_1603type.error"".autotmp_1602type.error"".&errChanIno type.*chan error"".&rwctype.*net.Conn""".&hijackOptions,type.*"".hijackOptions "".err_type.error€ýÿ€Eà$ê 3V; ? 4]-E!ZTgclocals·0372b889336bbdf612862c172920463dTgclocals·2b592d649ecec7c5b5fac74b8e09bee8ì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goþ"".func·003  eH‹ %H;awèëêHƒìhH‹BH‹ZH‰\$0H‹ZH‰\$ H‹Z H‰\$(H‹hH‰,$èHÇD$XHÇD$`H-H‰,$Hl$XH‰l$H‹l$0L‹EL‰D$èH‹T$ ¶\$€ût3H‹D$XH‹L$`H‰L$@HƒøH‰D$8tHƒ:uH‰$H‰D$H‰L$èH‹\$(H‹+H‰,$èH‹T$ H‹D$H‹L$Hƒøt#Hƒ:uH‰$H‰D$HH‰D$H‰L$PH‰L$èHƒÄhà + "runtime.morestack„"runtime.closechan¶type.chan errorø(runtime.selectnbrecvü2runtime.writebarrierifacež,io.(*PipeReader).CloseŽ2runtime.writebarrierifaceÐ "".autotmp_1605type.error"".&readCloser(type.**io.PipeReader"".&retErrtype.*error"".&errCo type.*chan error "".err?type.error "".err_type.errorÐõÏ$ú +9 X ,A:B8 +Tgclocals·7c13896baab3273e10662a9a37b348ceTgclocals·50e42ec547586bf00be346cef54257daò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".func·004àÎeH‹ %H„$ðþÿÿH;AwèëâHìH‹ZH‰œ$¨H‹rH‹ZH‰œ$H‹Z H‰œ$ H¼$(1ÀèHœ$(Hƒû„2HÇÅHÇÂH‰œ$H‰¬$H‰”$ HH‰$H‰´$˜H‰t$èH‹T$H‹D$H‹œ$H‰$H‰”$àH‰T$H‰„$èH‰D$èHH‰$H‹œ$˜H‰\$HƒD$èH‹T$H‹D$H‹œ$HƒÃH‰$H‰”$àH‰T$H‰„$èH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$ H‰\$ èH‹\$(H‰œ$H‹\$0H‰œ$H‹œ$H‹+H‰¬$ˆH¼$H1ÀèHÇÈœ$IHÇÈœ$JH‹1íH9è„H‹”$ˆH‰„$ÐH‰„$hH‰”$ØH‰”$pH‹¬$˜H‹](H‰œ$ˆH‹´$¨H‹>H‰<$H5H|$H¥H¥H‹œ$H‰\$H‹´$H‰t$ H´$HHl$(H‰ïèèH‹D$pH‹L$xH‰Œ$¸HƒøH‰„$°„ÓHH‰$H‰D$H‰L$èH‹L$¶\$ €û„¦H‹Hû”…–HH‰$èH‹L$H‰ÏHƒù„h1ÀèH‰Œ$€H‰ $Hƒ<$„>H‹¼$˜H/H|$H‰îH¥H¥èH‹œ$€H‰œ$€H‹1íH9è„ÎH‹œ$€H‰œ$¸H‰„$°H‹œ$H‹+H‰,$èH‹”$°H‹D$H‹L$H‰Œ$ÈHƒøH‰„$ÀtHƒúu H‰ÂH‰Œ$¸H‰”$°H‰”$ðH‹œ$¸H‰œ$øHH‰$H‹œ$ H‹+H‰l$Hœ$ðH‰\$èH‹œ$ H‹+H‰,$èHÄÃHH‰$HH‰\$HH‰\$èH‹D$éÿÿÿ‰%é¶þÿÿ‰é‘þÿÿHH‰$HH‰\$HH‰\$èH‹D$éMýÿÿ‰éÇûÿÿF +0"runtime.morestack°ð runtime.duffzero®type.stringÚruntime.convT2EÄ2runtime.writebarrierifaceÒtype.boolŠruntime.convT2Eü2runtime.writebarrierifaceŠTgo.string."/containers/%s/stats?stream=%v"þfmt.SprintföÜ runtime.duffzero¼@go.itab.*io.PipeWriter.io.Writerögo.string."GET"æ „ runtime.duffcopyð &"".(*Client).streamÆ +type.*"".Errorì +$runtime.assertI2T2À .type."".NoSuchContainerÒ "runtime.newobject„ ð runtime.duffzeroê 4runtime.writebarrierstring˜ Bgo.itab.*"".NoSuchContainer.error†,io.(*PipeWriter).CloseÆtype.chan error’"runtime.chansend1º"runtime.closechanØ0type.*"".NoSuchContainerîtype.error†Bgo.itab.*"".NoSuchContainer.errorš runtime.typ2Itabâ&type.*io.PipeWriterøtype.io.Writer@go.itab.*io.PipeWriter.io.Writer¤ runtime.typ2Itab $"".autotmp_1621type.*uint8"".autotmp_1620Ÿ0type.*"".NoSuchContainer"".autotmp_1618*type."".streamOptions"".autotmp_1617"type.interface {}"".autotmp_1616ß"type.interface {}"".autotmp_1614ÿ&type.[]interface {}"".autotmp_1613¿type.error"".autotmp_1612type.error"".autotmp_16110type.*"".NoSuchContainer"".autotmp_1609&type.*io.PipeWriter"".autotmp_1608Ÿtype.string"".autotmp_1607Ï(type.[2]interface {}"".&errCß type.*chan error"".&writeCloserÿ(type.**io.PipeWriter"".&optsï*type.*"".StatsOptions +"".&cÏ type.**"".Client"".closeErrŸtype.error "".err¿type.error% ƒŸ ‡° \œ MÐ+< +]  $ –B ME -:¬Òù>3LN†0ETgclocals·fb05dbbfacbbe47b8b1eb4226ce34430Tgclocals·df3c8560fdbead80e4ddce1ccdbd1147ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".func·005€îeH‹ %HD$¸H;AwèëåHìÈH‹BH‹ZH‰\$(H‹ZH‰\$0H‹h H‰l$ H|$81ÀèH\$8H‰$HÇD$ÇD$èHl$8H‰,$H‹l$ H‰l$HÇD$è¶\$€ûtH‹\$(H‹+H‰,$èHÄÈÃHl$8H‰,$H‹l$0L‹EL‰D$HÇD$è¶\$€ûtHÄÈÃH\$8H‰$è  +*"runtime.morestack’¸ runtime.duffzeroÐ"runtime.newselect’$runtime.selectrecvÈ,io.(*PipeReader).Close¢$runtime.selectrecvâ runtime.selectgo"".autotmp_1629Ÿìtype.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [2]*uint8; pollorderarr [2]uint16 }"".autotmp_1628Ï type.<-chan bool"".&quit¯(type.*chan struct {}"".&readCloser¿(type.**io.PipeReader&"6€(Ì 8 ++/g!-0Tgclocals·73423680ca5f2d7df4fe760a82d507fbTgclocals·f34a2133376bc2e71ee31cc35164f3d3ò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goþ"".func·006 ŽeH‹ %H;awèëêHƒìH‹JH‹BH‹)H‰,$H‹(H‰l$H QjèYYHƒÄà + "runtime.morestackhV"".(*eventMonitoringState).monitorEvents·fxruntime.newproc   +P‚P +;Tgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·3280bececceccd33cb74587feedb1f9fê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþ"".func·007àÊeH‹ %HD$¸H;AwèëåHìÈH‹ZH‰\$XH‹ZH‰\$`H‹ZH‰\$@H‹Z H‰\$PH‹œ$ØH‰$H QjèYYH…À…kH‹œ$ÐHƒû„RH‹S@H‹kHH‰¬$H‰,$H‰”$ˆHƒú„%HZ SjèYYH…À…HH‰$H‹¼$ÐHƒÿ„ÜHo@H|$H‰îH¥H¥èH‹L$H‹D$ H‰Œ$˜H‰L$hH‰„$ H‰D$pHH‰$èH‹L$H‰ÏHƒù„z1ÀèH‰L$0H‰ $Hƒ<$„SH‹\$hH‰\$H‹\$pH‰\$èH‹\$0H‰\$(HH‰$èH‹L$H‰L$HH‹\$(H‰$HH‰D$xH‰D$H‰Œ$€H‰L$èH‹L$H‹D$ H‹\$XH‰$H‰Œ$¸H‰L$H‰„$ÀH‰D$èH‹D$XHƒ8„3H‹H‹-H9ë…’L$L‰ÇH‰ÆH¥H¥H‹-H‰l$H‹-H‰l$èH‹D$X¶\$ €ûtXH‹\$`H‹H‹k0H‰,$è¶\$€ût7HH‰$H‹\$@H‹+H‰l$HH‰\$èèHÄÈÃëðH‹H‹-H9ëu>L$L‰ÇH‰ÆH¥H¥H‹-H‰l$H‹-H‰l$èH‹D$X¶\$ €û…[ÿÿÿH‹(H‰¬$¨H‹hH‰¬$°HH‰$H‹\$PH‹+H‰l$Hœ$¨H‰\$èH‹l$HH‹]0Hƒû„3þÿÿH‹\$`H‹H‹k0H‰,$è¶\$€ûuèHÄÈÃH‹\$HH‰\$8HH‰$H‹\$@H‹+H‰l$H\$8H‰\$èéÐýÿÿ‰%é¡ýÿÿ‰éýÿÿ‰éýÿÿèHÄÈÉéÔüÿÿ‰é§üÿÿèHÄÈÃJ +*"runtime.morestackªPnet/http/httputil.(*ClientConn).Close·fº"runtime.deferprocØ"runtime.deferprocütype.io.ReaderÒruntime.convI2I¨4type.encoding/json.Decoderº"runtime.newobjectìÄ runtime.duffzeroÆ2runtime.writebarrierifaceè"type."".APIEventsú"runtime.newobject®$type.*"".APIEventsæ>encoding/json.(*Decoder).DecodeÊ2runtime.writebarrierifaceü io.EOF¸ io.EOFÐ io.EOFäruntime.ifaceeq¬ H"".(*eventMonitoringState).isEnabledÎ .type.chan *"".APIEventsþ "".EOFEvent’ +"runtime.chansend1ž +&runtime.deferreturnÆ +&io.ErrUnexpectedEOFú +&io.ErrUnexpectedEOF’ &io.ErrUnexpectedEOF¦ runtime.ifaceeqˆ type.chan errorÎ "runtime.chansend1ž H"".(*eventMonitoringState).isEnabled¾ &runtime.deferreturnð .type.chan *"".APIEvents°"runtime.chansend1ú&runtime.deferreturn²&runtime.deferreturn  "".autotmp_1638¯6type.*encoding/json.Decoder"".autotmp_16376type.*encoding/json.Decoder"".autotmp_1636_type.io.Reader"".autotmp_1635Ÿ$type.*"".APIEvents"".autotmp_1634type.bool"".autotmp_1633?type.error"".autotmp_1631type.error"".&eventÿ$type.*"".APIEvents$encoding/json.r·2¿type.io.Reader"".&errChanï type.*chan error"".&eventChan0type.*chan *"".APIEvents +"".&cÏ type.**"".Client"".&errßtype.*error"".decoder¿6type.*encoding/json.Decoder"".connDtype.*net/http/httputil.ClientConn "".res.type.*net/http.ResponseZ"8FèÏ] ðV°F&O¶rM)!MA1!>\ÀF6£3 J +T( :$5Tgclocals·8e6ff68ca952ded665cfa894236f9944Tgclocals·48a37d9114fa45f0336e02e754d41f88ê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goþ"".func·008€æeH‹ %HD$¨H;AwèëåHìØH‹rH‹ZH‰\$HHÇ„$HÇ„$H,$H‰ïH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$èH‹l$ H‹T$(H‹D$0H‹L$8H‰L$xHƒøH‰D$ptH‰„$H‰Œ$HÄØÃH‰l$PH‰,$H‰T$XH‰T$H‹t$HHl$H‰ïH¥H¥H¥èH‹´$øH‹¬$ðH‹”$¶\$(H‹D$0H‹|$8H‰|$hHƒøH‰D$`tH‰„$H‰¼$HÄØÀûtZH‰4$H‹] ÿÓ¶\$€ût&H‹H‰œ$H‹H‰œ$HÄØÃHÇ„$HÇ„$HÄØÃHƒú„¿H‰$H‹œ$H‰\$è¶\$€û„+H‹œ$àH‰œ$ H‹œ$èH‰œ$¨Hœ$°HÇHÇCHœ$°Hƒû„ÛHÇÂHÇÁH‰œ$ÀH‰”$ÈH‰Œ$ÐHH‰$Hœ$ H‰\$èH‹L$H‹D$H‹œ$ÀH‰$H‰Œ$H‰L$H‰„$˜H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ÀH‰\$H‹œ$ÈH‰\$H‹œ$ÐH‰\$ èH‹L$(H‹D$0H‰Œ$H‰„$HÄØÉéÿÿÿH‹œ$H‰$H‹œ$H‰\$è¶\$€ût HÇ„$HÇ„$HÄØÃH‹œ$H‰œ$H‹œ$H‰œ$HÄØÃH‰4$H‹]0ÿÓ‹\$ã +ƒût HÇ„$HÇ„$HÄØÃH‹œ$øH‰$H‹œ$ðH‹[ ÿÓ¶\$€û…ÁH‹œ$àH‰$H‹œ$èH‰\$èH‹\$H‰\$@H‹D$H‹L$ H‰Œ$ˆHƒøH‰„$€„GH‰$H‰L$è¶\$€û„+H‹œ$àH‰œ$ H‹œ$èH‰œ$¨Hœ$°HÇHÇCHœ$°Hƒû„ÛHÇÁHÇÂH‰œ$ÀH‰Œ$ÈH‰”$ÐHH‰$Hœ$ H‰\$èH‹L$H‹D$H‹œ$ÀH‰$H‰Œ$H‰L$H‰„$˜H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ÀH‰\$H‹œ$ÈH‰\$H‹œ$ÐH‰\$ èH‹L$(H‹D$0H‰Œ$H‰„$HÄØÉéÿÿÿH‹\$@H‰$èHÇ„$HÇ„$HÄØÃëÞ0 +*"runtime.morestackÚ"path/filepath.Rel¨²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.Matchesè +†*path/filepath.SkipDir¤*path/filepath.SkipDirÄos.IsPermissionÌtype.stringøruntime.convT2Eâ 2runtime.writebarrierifaceð 6go.string."can't stat '%s'"ä +fmt.Errorfò os.IsNotExist® +À +”os.OpenŒos.IsPermission”type.stringÀruntime.convT2Eª2runtime.writebarrieriface¸Vgo.string."no permission to read from '%s'"¬fmt.Errorfš os.(*File).Close€°."".autotmp_1659"type.interface {}"".autotmp_1658*type.*[1]interface {}"".autotmp_1657&type.[]interface {}"".autotmp_1656"type.interface {}"".autotmp_1654/&type.[]interface {}"".autotmp_1653type.error"".autotmp_1652type.string"".autotmp_1651(type.[1]interface {}"".autotmp_1649type.bool"".autotmp_1647type.bool"".autotmp_1645otype.string"".autotmp_1644O(type.[1]interface {}"".autotmp_1643type.bool"".&excludesŸtype.*[]string "".err¯type.error"".currentFile¯type.*os.File "".errïtype.error "".errÏtype.error"".relFilePathtype.string "".~r3`type.error "".err@type.error"".f  type.os.FileInfo"".filePathtype.stringŠ"°Š¯°y¯°>¯°¯°Ñ¯°N¯°'¯°8¯°³¯°4¯°€ dŠGNb&  +$«( ( + (:.«/0lgèvGšv74Tgclocals·0e5d6e03d8b052993869281db2167ff7Tgclocals·8638ac1ded2e05617036c77f7600dfacæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.goþ"".func·009àÖeH‹ %H;awèëêHƒìpH‹ZH‰\$(HÇD$@HÇD$HHÇD$0HÇD$8HH‰$èH‹D$H‰D$ H‰$Hƒ<$„¯H‹\$@H‰\$H‹\$HH‰\$èH‹\$ H‰\$ H‹1íH9ètOH‹L$ H‰D$PH‰L$XH‰D$0H‰D$`H‰L$8H‰L$hHH‰$H‹\$(H‹+H‰l$H\$`H‰\$èHƒÄpÃHH‰$HH‰\$HH‰\$èH‹D$낉%éEÿÿÿ + "runtime.morestack”.type.errors.errorString¦"runtime.newobjectŠ4runtime.writebarrierstring¬Bgo.itab.*errors.errorString.errorŽtype.chan errorÎ"runtime.chansend1æ0type.*errors.errorStringütype.error”Bgo.itab.*errors.errorString.error¨ runtime.typ2Itabà"".autotmp_1669?type.error"".autotmp_1668Ÿ0type.*errors.errorString"".autotmp_1667type.error"".autotmp_16660type.*errors.errorString "".~r0type.errorerrors.text·2_type.string"".&errChannel type.*chan erroràÕßà@°h#È@R2b-Tgclocals·0372b889336bbdf612862c172920463dTgclocals·0730e324c95d53ccaec07bf254f1f516æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.goþ"".func·010€æeH‹ %H;awèëêHƒì0H‹ZH‰\$H‹ZH‹+H‰,$èH‹\$H‰\$ H‹\$H‰\$(HH‰$H‹\$H‹+H‰l$H\$ H‰\$èHƒÄ0à + + "runtime.morestack^8crypto/tls.(*Conn).Handshake”type.chan errorÔ"runtime.chansend1`"".autotmp_1672type.error"".&errChannel/ type.*chan error`X_€¤'G.;Tgclocals·73423680ca5f2d7df4fe760a82d507fbTgclocals·f1ce4f14231620ac9cd58e5cd8e6fa2dæ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.goþ"".initÀ¨eH‹ %H;awèëêHƒì`¶€ût¶€ûuHƒÄ`Ãè ÆèèèèèèèèèèèèèèèèèèèèèèèèèèèèHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹T$H‹L$H‹D$ HH‰$H‰T$HH‰T$H‰L$PH‰L$H‰D$XH‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$8H‰L$H‰D$@H‰D$èÆHƒÄ`î + 0runtime.morestack_noctxt:"".initdone·R"".initdone·p"runtime.throwinit€"".initdone·Œ¬github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.init–¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.init sync.initªmath.init´¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.init¾¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir.initÈšgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.initÒtime.initÜstrconv.initæruntime.initðreflect.initú$path/filepath.init„net/url.initŽ,net/http/httputil.init˜net/http.init¢net.init¬io/ioutil.init¶ crypto/x509.initÀcrypto/tls.initÊbufio.initÔstrings.initÞpath.initèos.initòio.initüfmt.init†$encoding/json.init(encoding/base64.initšbytes.init¨pgo.string."Failed to read authentication from dockercfg"Îerrors.Newð""".AuthParseErrorª2runtime.writebarrieriface¸8go.string."invalid endpoint"Þerrors.New€*"".ErrInvalidEndpointº2runtime.writebarrierifaceÈZgo.string."cannot connect to Docker endpoint"îerrors.New."".ErrConnectionRefusedÊ2runtime.writebarrierifaceØ go.string."1.12"þ "".NewAPIVersionª "".apiVersion112ø2runtime.writebarrierslice†Hgo.string."container already exists"¬errors.NewÎ8"".ErrContainerAlreadyExistsˆ 2runtime.writebarrieriface– bgo.string."no listeners present to receive event"¼ errors.NewÞ """.ErrNoListeners˜ +2runtime.writebarrieriface¦ +jgo.string."listener already exists for docker events"Ì +errors.Newî +6"".ErrListenerAlreadyExists¨ 2runtime.writebarrieriface¶ 2go.string."no such image"Ü errors.Newþ """.ErrNoSuchImage¸ 2runtime.writebarrierifaceÆ ‚go.string."missing remote repository e.g. 'github.com/user/repo'"ì errors.NewŽ """.ErrMissingRepoÈ 2runtime.writebarrierifaceÖ Bgo.string."missing output stream"ü errors.Newž2"".ErrMissingOutputStreamØ2runtime.writebarrierifaceæšgo.string."image build may not be provided BOTH context dir and input stream"Œerrors.New®,"".ErrMultipleContextsè2runtime.writebarrierifaceöhgo.string."must specify at least one name to export"œerrors.New¾,"".ErrMustSpecifyNamesø2runtime.writebarrieriface†Dgo.string."network already exists"¬errors.NewÎ4"".ErrNetworkAlreadyExistsˆ2runtime.writebarrieriface”"".initdone·À"".autotmp_1686/$type."".APIVersion"".autotmp_1685type.error"".autotmp_1684type.error"".autotmp_1683type.error"".autotmp_1682type.error"".autotmp_1681type.error"".autotmp_1680type.error"".autotmp_1679type.error"".autotmp_1678type.error"".autotmp_1677type.error"".autotmp_1676type.error"".autotmp_1675type.error"".autotmp_1674Otype.error À¿ÀÜ¿ *ÑHçHèH HÊÑ¡H.HHW1H4HHDHHHHH«H¨ 7éTgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·7b2d1dc8e692ba633cb2c876407e20f2æ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.goè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goì/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.goò/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.goê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.goê/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.goî/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.goþ>type..hash."".AuthConfigurationÀ²eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„ßHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$0HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ + 0runtime.morestack_noctxt„runtime.strhashðruntime.strhashÔruntime.strhash¸runtime.strhash@@"".autotmp_1690type.uintptr"".autotmp_1689type.uintptr"".autotmp_1688type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p4type.*"".AuthConfiguration@Ô?@1   AßTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ:type..eq."".AuthConfigurationàØeH‹ %H;awèëêHƒìHH‹\$PHƒû„üH‹3H‹KH‹\$XHƒû„ßH‹H‹CH9Á…ÅH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„‹H‹\$PHƒû„uH‹SH‹CH‹\$XHƒû„WH‹sH‹KH9È…<H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €û„H‹\$PHƒû„ìH‹s H‹K(H‹\$XHƒû„ÎH‹S H‹C(H9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹S0H‹C8H‹\$XHƒûtWH‹s0H‹K8H9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé+ÿÿÿ‰é ÿÿÿÆD$hHƒÄHÉé¢þÿÿ‰é„þÿÿÆD$hHƒÄHÉéþÿÿ‰éýýÿÿ + 0runtime.morestack_noctxtî runtime.eqstringÐ runtime.eqstring² runtime.eqstringô runtime.eqstring@"".autotmp_1698type.string"".autotmp_1697type.string"".autotmp_1696type.string"".autotmp_1695type.string"".autotmp_1694type.string"".autotmp_1693type.string"".autotmp_1692?type.string"".autotmp_1691type.string "".~r30type.bool"".s type.uintptr"".q4type.*"".AuthConfiguration"".p4type.*"".AuthConfigurationJ· °° vºTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash.[8]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_1701type.int"".autotmp_1700type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[8]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq.[8]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_1705?type.string"".autotmp_1704type.string"".autotmp_1703_type.int"".autotmp_1702Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[8]string"".ptype.*[8]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..hash.[8]"".AuthConfigurationàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkí@HëH‰$HÇD$@H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü>type..hash."".AuthConfiguration@` "".autotmp_1708type.int"".autotmp_1707type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p:type.*[8]"".AuthConfiguration`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ@type..eq.[8]"".AuthConfigurationÀ +¦ +eH‹ %H;awèëêHƒìh1ÀHÇD$(H‹l$(H9èH‰D$0H‹L$pHƒù„EH‹\$xH‰ÅHkí@HéHƒû„%H‰ÅHkí@HëH‰L$@Hƒù„H‹1H‹IH‰\$8Hƒû„èH‹H‹CH9Á…¤H‰t$XH‰4$H‰L$`H‰L$H‰T$HH‰T$H‰D$PH‰D$è¶\$ €û„jH‹\$@Hƒû„ˆH‹SH‹CH‹\$8Hƒû„jH‹sH‹KH9È…3H‰T$HH‰$H‰D$PH‰D$H‰t$XH‰t$H‰L$`H‰L$è¶\$ €û„ùH‹\$@Hƒû„ H‹s H‹K(H‹\$8Hƒû„ëH‹S H‹C(H9Á…ÂH‰t$XH‰4$H‰L$`H‰L$H‰T$HH‰T$H‰D$PH‰D$è¶\$ €û„ˆH‹\$@Hƒû„ŠH‹S0H‹C8H‹\$8HƒûtsH‹s0H‹K8H9ÈuYH‰T$HH‰$H‰D$PH‰D$H‰t$XH‰t$H‰L$`H‰L$è¶\$ €ût#H‹D$0HÿÀH‹l$(H9èŒýýÿÿÆ„$ˆHƒÄhÃÆ„$ˆHƒÄhÉ뉉éoÿÿÿ‰éÿÿÿ‰éðþÿÿ‰éþÿÿ‰éqþÿÿ‰éþÿÿ‰éôýÿÿ‰éÔýÿÿ‰é´ýÿÿ + 0runtime.morestack_noctxtŽ runtime.eqstringð runtime.eqstringÒ runtime.eqstring¤ runtime.eqstring@Ð "".autotmp_1720type.string"".autotmp_1719type.string"".autotmp_1718type.string"".autotmp_1717type.string"".autotmp_1716type.string"".autotmp_1715type.string"".autotmp_1714?type.string"".autotmp_1713type.string"".autotmp_1712_4type.*"".AuthConfiguration"".autotmp_1711O4type.*"".AuthConfiguration"".autotmp_1710type.int"".autotmp_1709otype.int "".~r30type.bool"".s type.uintptr"".q:type.*[8]"".AuthConfiguration"".p:type.*[8]"".AuthConfiguration&ШÏÐ ÏÐP  ÆËTgclocals·fa7203fd5ed88aea99b7be572f707eb0Tgclocals·65526a5f07004f02424fe51b799cdd23è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash.[3]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_1723type.int"".autotmp_1722type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[3]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq.[3]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_1727?type.string"".autotmp_1726type.string"".autotmp_1725_type.int"".autotmp_1724Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[3]string"".ptype.*[3]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash.[2]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_1730type.int"".autotmp_1729type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[2]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq.[2]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_1734?type.string"".autotmp_1733type.string"".autotmp_1732_type.int"".autotmp_1731Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[2]string"".ptype.*[2]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..hash."".dockerConfigÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.strhash@@ +"".autotmp_1736type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*"".dockerConfig@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq."".dockerConfigÀ´eH‹ %H;awèëêHƒìHH‹\$PHƒû„êH‹3H‹KH‹\$XHƒû„ÍH‹H‹CH9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹SH‹CH‹\$XHƒûtWH‹sH‹KH9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé,ÿÿÿ‰éÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring° runtime.eqstring@"".autotmp_1740type.string"".autotmp_1739type.string"".autotmp_1738?type.string"".autotmp_1737type.string "".~r30type.bool"".s type.uintptr"".q*type.*"".dockerConfig"".p*type.*"".dockerConfig2Õ    vªTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ:type..hash.[8]"".dockerConfigàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkí HëH‰$HÇD$ H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü4type..hash."".dockerConfig@` "".autotmp_1743type.int"".autotmp_1742type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p0type.*[8]"".dockerConfig`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ6type..eq.[8]"".dockerConfigÀªeH‹ %H;awèëêHƒìh1ÀHÇD$(H‹l$(H9è!H‰D$0H‹L$pHƒù„GH‹\$xH‰ÅHkí HéHƒû„'H‰ÅHkí HëH‰L$@Hƒù„H‹1H‹IH‰\$8Hƒû„êH‹H‹CH9Á…ÂH‰t$XH‰4$H‰L$`H‰L$H‰T$HH‰T$H‰D$PH‰D$è¶\$ €û„ˆH‹\$@Hƒû„ŠH‹SH‹CH‹\$8HƒûtsH‹sH‹KH9ÈuYH‰T$HH‰$H‰D$PH‰D$H‰t$XH‰t$H‰L$`H‰L$è¶\$ €ût#H‹D$0HÿÀH‹l$(H9èŒßþÿÿÆ„$ˆHƒÄhÃÆ„$ˆHƒÄhÉ뉉éoÿÿÿ‰éÿÿÿ‰éòþÿÿ‰éÒþÿÿ‰é²þÿÿ + 0runtime.morestack_noctxtŽ runtime.eqstringà runtime.eqstring@Ð"".autotmp_1751type.string"".autotmp_1750type.string"".autotmp_1749?type.string"".autotmp_1748type.string"".autotmp_1747_*type.*"".dockerConfig"".autotmp_1746O*type.*"".dockerConfig"".autotmp_1745type.int"".autotmp_1744otype.int "".~r30type.bool"".s type.uintptr"".q0type.*[8]"".dockerConfig"".p0type.*[8]"".dockerConfig&ÐÆÏÐ ÏÐ2  ÆiqTgclocals·fa7203fd5ed88aea99b7be572f707eb0Tgclocals·65526a5f07004f02424fe51b799cdd23è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ.type..hash."".APIEventsÀ²eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„ßHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$0HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ + 0runtime.morestack_noctxt„runtime.strhashðruntime.strhashÔruntime.strhash¸runtime.memhash@@"".autotmp_1755type.uintptr"".autotmp_1754type.uintptr"".autotmp_1753type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p$type.*"".APIEvents@Ô?@1   AßTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ*type..eq."".APIEvents žeH‹ %H;awèëêHƒìHH‹\$PHƒû„ŸH‹3H‹KH‹\$XHƒû„‚H‹H‹CH9Á…hH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„.H‹\$PHƒû„H‹SH‹CH‹\$XHƒû„úH‹sH‹KH9È…ßH‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €û„¥H‹\$PHƒû„H‹s H‹K(H‹\$XHƒûtxH‹S H‹C(H9ÁuaH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût+H‹l$PH‹]0L‹D$XI‹h0H9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉ넉éjÿÿÿÆD$hHƒÄHÉéÿþÿÿ‰éáþÿÿÆD$hHƒÄHÉéwþÿÿ‰éZþÿÿ + + 0runtime.morestack_noctxtî runtime.eqstringÐ runtime.eqstring¢ runtime.eqstring@"".autotmp_1761type.string"".autotmp_1760type.string"".autotmp_1759type.string"".autotmp_1758type.string"".autotmp_1757?type.string"".autotmp_1756type.string "".~r30type.bool"".s type.uintptr"".q$type.*"".APIEvents"".p$type.*"".APIEventsJå  ÐÐ vÚTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ>"".(*eventMonitoringState).Lock@@H‹\$H‰\$Hƒ|$té‰%ëò&(sync.(*RWMutex).Lock""..this:type.*"".eventMonitoringState   Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9fþ@"".(*eventMonitoringState).RLock@@H‹\$H‰\$Hƒ|$té‰%ëò&*sync.(*RWMutex).RLock""..this:type.*"".eventMonitoringState   Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9fþD"".(*eventMonitoringState).RLocker€dHÇD$HÇD$H‹\$H‰\$Hƒ|$té‰%ëòJ.sync.(*RWMutex).RLocker0 "".~r1 type.sync.Locker""..this:type.*"".eventMonitoringState@@@Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþD"".(*eventMonitoringState).RUnlock@@H‹\$H‰\$Hƒ|$té‰%ëò&.sync.(*RWMutex).RUnlock""..this:type.*"".eventMonitoringState   + Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9fþB"".(*eventMonitoringState).Unlock@@H‹\$H‰\$Hƒ|$té‰%ëò&,sync.(*RWMutex).Unlock""..this:type.*"".eventMonitoringState   Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9fþ<"".(*eventMonitoringState).Add`LH‹\$H‰\$Hƒ|$t HƒD$é‰%ëì2*sync.(*WaitGroup).Add sync.delta·2type.int""..this:type.*"".eventMonitoringState000Tgclocals·cd30d2bcfdea04ed7c49639580b4bd08Tgclocals·3280bececceccd33cb74587feedb1f9fþ>"".(*eventMonitoringState).Done`LH‹\$H‰\$Hƒ|$t HƒD$é‰%ëì2,sync.(*WaitGroup).Done""..this:type.*"".eventMonitoringState000Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9fþ>"".(*eventMonitoringState).Wait`LH‹\$H‰\$Hƒ|$t HƒD$é‰%ëì2,sync.(*WaitGroup).Wait""..this:type.*"".eventMonitoringState000Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9fþ."".(*APIVersion).String  eH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HH‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$8Hƒþt+H,$H‰ïH¥H¥H¥èH‹L$H‹D$ H‰L$@H‰D$HHƒÄ0ÉëÑ + 0runtime.morestack_noctxt¢$go.string."docker"Ì,go.string."APIVersion"ø$go.string."String" "runtime.panicwrapÞ("".APIVersion.String0` "".~r0type.string""..this&type.*"".APIVersion`±_`ÐÐ ATgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ2"".(*APIVersion).LessThanÀ¢eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹\$@1íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$@Hƒþt>H,$H‰ïH¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(è¶\$0ˆ\$`HƒÄ8Éë¾ + 0runtime.morestack_noctxt~$go.string."docker"¨,go.string."APIVersion"Ô(go.string."LessThan"ü"runtime.panicwrapö,"".APIVersion.LessThanPp "".~r1@type.bool"".other$type."".APIVersion""..this&type.*"".APIVersionp²opàà +}cTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþD"".(*APIVersion).LessThanOrEqualToÀ¢eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹\$@1íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$@Hƒþt>H,$H‰ïH¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(è¶\$0ˆ\$`HƒÄ8Éë¾ + 0runtime.morestack_noctxt~$go.string."docker"¨,go.string."APIVersion"Ô:go.string."LessThanOrEqualTo"ü"runtime.panicwrapö>"".APIVersion.LessThanOrEqualToPp "".~r1@type.bool"".other$type."".APIVersion""..this&type.*"".APIVersionp²opàà +}cTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþ8"".(*APIVersion).GreaterThanÀ¢eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹\$@1íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$@Hƒþt>H,$H‰ïH¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(è¶\$0ˆ\$`HƒÄ8Éë¾ + 0runtime.morestack_noctxt~$go.string."docker"¨,go.string."APIVersion"Ô.go.string."GreaterThan"ü"runtime.panicwrapö2"".APIVersion.GreaterThanPp "".~r1@type.bool"".other$type."".APIVersion""..this&type.*"".APIVersionp²opàà +}cTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþJ"".(*APIVersion).GreaterThanOrEqualToÀ¢eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹\$@1íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$@Hƒþt>H,$H‰ïH¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(è¶\$0ˆ\$`HƒÄ8Éë¾ + 0runtime.morestack_noctxt~$go.string."docker"¨,go.string."APIVersion"Ô@go.string."GreaterThanOrEqualTo"ü"runtime.panicwrapöD"".APIVersion.GreaterThanOrEqualToPp "".~r1@type.bool"".other$type."".APIVersion""..this&type.*"".APIVersionp²opàà +}cTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþ0"".(*APIVersion).compareÀ¤eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹\$@1íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$@Hƒþt?H,$H‰ïH¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èH‹\$0H‰\$`HƒÄ8Éë½ + 0runtime.morestack_noctxt~$go.string."docker"¨,go.string."APIVersion"Ô&go.string."compare"ü"runtime.panicwrapö*"".APIVersion.comparePp "".~r1@type.int"".other$type."".APIVersion""..this&type.*"".APIVersionp³opàà +}cTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþLtype..hash."".AttachToContainerOptionsÀ¾eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„¥HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„hHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„&Hƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„äHƒ$0HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$@HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HHÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$PHÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ‰%éÎþÿÿ‰%éŒþÿÿ‰%éOþÿÿ + 0runtime.morestack_noctxt„runtime.strhashð"runtime.interhashÜ"runtime.interhashÈ"runtime.interhash´runtime.memhash˜runtime.memhashüruntime.memhash@@"".autotmp_1774type.uintptr"".autotmp_1773type.uintptr"".autotmp_1772type.uintptr"".autotmp_1771type.uintptr"".autotmp_1770type.uintptr"".autotmp_1769type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".pBtype.*"".AttachToContainerOptions@ö?@Oàà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþHtype..eq."".AttachToContainerOptionsàÚeH‹ %HD$øH;AwèëåHìˆH‹œ$Hƒû„rH‹3H‹KH‹œ$˜Hƒû„RH‹H‹CH9Á…2H‰t$xH‰4$H‰Œ$€H‰L$H‰T$hH‰T$H‰D$pH‰D$è¶\$ €û„õH‹œ$˜Hƒû„ÜH‹KH‹sH‹œ$Hƒû„»H‹CH‹SH9È…šH‰D$HH‰$H‰T$PH‰T$H‰L$XH‰L$H‰t$`H‰t$è¶\$ €û„`H‹œ$˜Hƒû„GH‹K H‹s(H‹œ$Hƒû„&H‹C H‹S(H9È…H‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €û„ËH‹œ$˜Hƒû„²H‹K0H‹s8H‹œ$Hƒû„‘H‹C0H‹S8H9È…pH‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €û„6H‹œ$H‰$Hƒ<$„Hƒ$@H‹œ$˜H‰\$Hƒ|$„éHƒD$@HÇD$èH‹œ$H‰$Hƒ<$„²Hƒ$@H‹œ$˜H‰\$Hƒ|$„ˆHƒD$@HÇD$èH‹”$H‹Œ$˜¶\$€ûuÆ„$¨HĈÃH‹ZHH‹iHH9ëtÆ„$¨HĈöZP¶iP@8ëtÆ„$¨HĈÃÆ„$¨HĈÉ%élÿÿÿ‰%éBÿÿÿ‰%é ÿÿÿ‰%éáþÿÿÆ„$¨HĈÉéhþÿÿ‰éGþÿÿÆ„$¨HĈÉéÓýÿÿ‰é²ýÿÿÆ„$¨HĈÉé>ýÿÿ‰éýÿÿÆ„$¨HĈÉé§üÿÿ‰é‡üÿÿ +*0runtime.morestack_noctxt runtime.eqstringþruntime.ifaceeqìruntime.ifaceeqÚruntime.ifaceeqˆ  runtime.memequalš + runtime.memequal@"".autotmp_1783type.io.Writer"".autotmp_1782type.io.Writer"".autotmp_1781¿type.io.Writer"".autotmp_1780Ÿtype.io.Writer"".autotmp_1779type.io.Reader"".autotmp_1778_type.io.Reader"".autotmp_1777?type.string"".autotmp_1776type.string "".~r30type.bool"".s type.uintptr"".qBtype.*"".AttachToContainerOptions"".pBtype.*"".AttachToContainerOptionsn"˜?°°‡©Tgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·af3107c17ee1ab6f9f33230b5c7e3062è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ"".(*Port).Port œeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HH‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$8Hƒþt)H,$H‰ïH¥H¥èH‹L$H‹D$H‰L$@H‰D$HHƒÄ0ÉëÓ + 0runtime.morestack_noctxt¢$go.string."docker"Ì go.string."Port"ø go.string."Port" "runtime.panicwrapÚ"".Port.Port0` "".~r0type.string""..thistype.*"".Port`¯_`Ð Ð ATgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ "".(*Port).Proto œeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HH‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$8Hƒþt)H,$H‰ïH¥H¥èH‹L$H‹D$H‰L$@H‰D$HHƒÄ0ÉëÓ + 0runtime.morestack_noctxt¢$go.string."docker"Ì go.string."Port"ø"go.string."Proto" "runtime.panicwrapÚ"".Port.Proto0` "".~r0type.string""..thistype.*"".Port`¯_`Ð"Ð ATgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ*type..hash.[8]"".PortàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_1788type.int"".autotmp_1787type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p type.*[8]"".Port`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ&type..eq.[8]"".PortàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_1792?type."".Port"".autotmp_1791type."".Port"".autotmp_1790_type.int"".autotmp_1789Otype.int "".~r30type.bool"".s type.uintptr"".q type.*[8]"".Port"".p type.*[8]"".Port&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþHtype..hash."".CommitContainerOptionsÀºeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„cHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„&Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„äHƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$0HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$@HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$PHÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ‰%éÎþÿÿ‰%é‘þÿÿ + 0runtime.morestack_noctxt„runtime.strhashðruntime.strhashÜruntime.strhashÈruntime.strhash¬runtime.strhashruntime.memhash@@"".autotmp_1798type.uintptr"".autotmp_1797type.uintptr"".autotmp_1796type.uintptr"".autotmp_1795type.uintptr"".autotmp_1794type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p>type.*"".CommitContainerOptions@À?@E   AßTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..eq."".CommitContainerOptionsà  eH‹ %H;awèëêHƒìHH‹\$PHƒû„±H‹3H‹KH‹\$XHƒû„”H‹H‹CH9Á…zH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„@H‹\$PHƒû„*H‹SH‹CH‹\$XHƒû„ H‹sH‹KH9È…ñH‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €û„·H‹\$PHƒû„¡H‹s H‹K(H‹\$XHƒû„ƒH‹S H‹C(H9Á…hH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„.H‹\$PHƒû„H‹S0H‹C8H‹\$XHƒû„úH‹s0H‹K8H9È…ßH‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €û„¥H‹\$PHƒû„H‹s@H‹KHH‹\$XHƒûtxH‹S@H‹CHH9ÁuaH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût+H‹l$PH‹]PL‹D$XI‹hPH9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉ넉éjÿÿÿÆD$hHƒÄHÉéÿþÿÿ‰éáþÿÿÆD$hHƒÄHÉévþÿÿ‰éXþÿÿÆD$hHƒÄHÉéíýÿÿ‰éÏýÿÿÆD$hHƒÄHÉéeýÿÿ‰éHýÿÿ + 0runtime.morestack_noctxtî runtime.eqstringÐ runtime.eqstring² runtime.eqstring” runtime.eqstringæ runtime.eqstring@"".autotmp_1808type.string"".autotmp_1807type.string"".autotmp_1806type.string"".autotmp_1805type.string"".autotmp_1804type.string"".autotmp_1803type.string"".autotmp_1802type.string"".autotmp_1801type.string"".autotmp_1800?type.string"".autotmp_1799type.string "".~r30type.bool"".s type.uintptr"".q>type.*"".CommitContainerOptions"".p>type.*"".CommitContainerOptionsbÇ  ðð vúTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash."".ChangeÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.memhash@@ +"".autotmp_1810type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*"".Change@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq."".Change€úeH‹ %H;awèëêHƒìHH‹\$PHƒû„H‹3H‹KH‹\$XHƒûtwH‹H‹CH9ÁuaH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût+H‹l$PH‹]L‹D$XI‹hH9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉë…‰élÿÿÿ + 0runtime.morestack_noctxtÞ runtime.eqstring@ "".autotmp_1812?type.string"".autotmp_1811type.string "".~r30type.bool"".s type.uintptr"".qtype.*"".Change"".ptype.*"".Change2ƒ  ÀÀ +nRTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþLtype..hash."".CopyFromContainerOptionsÀ®eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$ HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éWÿÿÿ + + 0runtime.morestack_noctxt„"runtime.interhashèruntime.strhashÌruntime.strhash@@ "".autotmp_1815type.uintptr"".autotmp_1814type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".pBtype.*"".CopyFromContainerOptions@ž?@'àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþHtype..eq."".CopyFromContainerOptions€æeH‹ %H;awèëêHƒìhH‹\$xHƒû„ƒH‹ H‹sH‹\$pHƒû„fH‹H‹SH9È…IH‰D$HH‰$H‰T$PH‰T$H‰L$XH‰L$H‰t$`H‰t$è¶\$ €û„H‹\$pHƒû„ùH‹sH‹KH‹\$xHƒû„ÛH‹SH‹CH9Á…½H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„ƒH‹\$pHƒûttH‹S H‹C(H‹\$xHƒût]H‹s H‹K(H9ÈuCH‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût Æ„$ˆHƒÄhÃÆ„$ˆHƒÄhÉ량ëˆÆ„$ˆHƒÄhÉéÿÿÿ‰éÿÿÿÆ„$ˆHƒÄhÉé“þÿÿ‰évþÿÿ + + 0runtime.morestack_noctxtîruntime.ifaceeqÐ runtime.eqstringš runtime.eqstring@Ð"".autotmp_1821type.string"".autotmp_1820type.string"".autotmp_1819type.string"".autotmp_1818_type.string"".autotmp_1817?type.io.Writer"".autotmp_1816type.io.Writer "".~r30type.bool"".s type.uintptr"".qBtype.*"".CopyFromContainerOptions"".pBtype.*"".CopyFromContainerOptions>ÐÍÏÐ ÏÐÏÐÏÐÀÀ vÊTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·e13351f28add7c60853cb3aac0a0e34eè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..hash."".KeyValuePairÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.strhash@@ +"".autotmp_1823type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*"".KeyValuePair@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq."".KeyValuePairÀ´eH‹ %H;awèëêHƒìHH‹\$PHƒû„êH‹3H‹KH‹\$XHƒû„ÍH‹H‹CH9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹SH‹CH‹\$XHƒûtWH‹sH‹KH9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé,ÿÿÿ‰éÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring° runtime.eqstring@"".autotmp_1827type.string"".autotmp_1826type.string"".autotmp_1825?type.string"".autotmp_1824type.string "".~r30type.bool"".s type.uintptr"".q*type.*"".KeyValuePair"".p*type.*"".KeyValuePair2Õ    vªTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ2type..hash."".PortBindingÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.strhash@@ +"".autotmp_1829type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p(type.*"".PortBinding@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ.type..eq."".PortBindingÀ´eH‹ %H;awèëêHƒìHH‹\$PHƒû„êH‹3H‹KH‹\$XHƒû„ÍH‹H‹CH9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹SH‹CH‹\$XHƒûtWH‹sH‹KH9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé,ÿÿÿ‰éÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring° runtime.eqstring@"".autotmp_1833type.string"".autotmp_1832type.string"".autotmp_1831?type.string"".autotmp_1830type.string "".~r30type.bool"".s type.uintptr"".q(type.*"".PortBinding"".p(type.*"".PortBinding2Õ    vªTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ6type..hash."".RestartPolicyÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.memhash@@ +"".autotmp_1835type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p,type.*"".RestartPolicy@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ2type..eq."".RestartPolicy€úeH‹ %H;awèëêHƒìHH‹\$PHƒû„H‹3H‹KH‹\$XHƒûtwH‹H‹CH9ÁuaH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût+H‹l$PH‹]L‹D$XI‹hH9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉë…‰élÿÿÿ + 0runtime.morestack_noctxtÞ runtime.eqstring@ "".autotmp_1837?type.string"".autotmp_1836type.string "".~r30type.bool"".s type.uintptr"".q,type.*"".RestartPolicy"".p,type.*"".RestartPolicy2ƒ  ÀÀ +nRTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash."".DeviceÀ®eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$ HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éWÿÿÿ + + 0runtime.morestack_noctxt„runtime.strhashèruntime.strhashÌruntime.strhash@@ "".autotmp_1840type.uintptr"".autotmp_1839type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*"".Device@ž?@'àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq."".DeviceàÆeH‹ %H;awèëêHƒìHH‹\$PHƒû„sH‹3H‹KH‹\$XHƒû„VH‹H‹CH9Á…<H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„H‹\$PHƒû„ìH‹SH‹CH‹\$XHƒû„ÎH‹sH‹KH9È…³H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût}H‹\$PHƒûtnH‹s H‹K(H‹\$XHƒûtWH‹S H‹C(H9Áu@H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé+ÿÿÿ‰é ÿÿÿÆD$hHƒÄHÉé£þÿÿ‰é†þÿÿ + + 0runtime.morestack_noctxtî runtime.eqstringÐ runtime.eqstring’ runtime.eqstring@"".autotmp_1846type.string"".autotmp_1845type.string"".autotmp_1844type.string"".autotmp_1843type.string"".autotmp_1842?type.string"".autotmp_1841type.string "".~r30type.bool"".s type.uintptr"".qtype.*"".Device"".ptype.*"".Device>Æ °° vºTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash."".ULimitÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.memhash@@ +"".autotmp_1848type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*"".ULimit@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq."".ULimitÀ¶eH‹ %H;awèëêHƒìHH‹\$PHƒû„«H‹3H‹KH‹\$XHƒû„ŽH‹H‹CH9ÁuxH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$èH‹L$PH‹D$X¶\$ €ût8H‹YH‹hH9ët +ÆD$hHƒÄHÃH‹YH‹hH9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉékÿÿÿ‰éNÿÿÿ + 0runtime.morestack_noctxtæ runtime.eqstring@ "".autotmp_1850?type.string"".autotmp_1849type.string "".~r30type.bool"".s type.uintptr"".qtype.*"".ULimit"".ptype.*"".ULimit>‡  àà +rnTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþHtype..hash."".CreateContainerOptionsÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.memhash@@ +"".autotmp_1852type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p>type.*"".CreateContainerOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..eq."".CreateContainerOptionsÀ¶eH‹ %H;awèëêHƒìHH‹\$PHƒû„«H‹3H‹KH‹\$XHƒû„ŽH‹H‹CH9ÁuxH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$èH‹L$PH‹D$X¶\$ €ût8H‹YH‹hH9ët +ÆD$hHƒÄHÃH‹YH‹hH9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉékÿÿÿ‰éNÿÿÿ + 0runtime.morestack_noctxtæ runtime.eqstring@ "".autotmp_1854?type.string"".autotmp_1853type.string "".~r30type.bool"".s type.uintptr"".q>type.*"".CreateContainerOptions"".p>type.*"".CreateContainerOptions>‡  àà +rnTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ&type..hash."".StateÀºeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„cHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„&Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„äHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$(HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$@HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ‰%éÎþÿÿ‰%é‘þÿÿ + 0runtime.morestack_noctxt„runtime.memhashðruntime.memhashÜruntime.memhashÈruntime.strhash¬(type..hash.time.Time(type..hash.time.Time@@"".autotmp_1860type.uintptr"".autotmp_1859type.uintptr"".autotmp_1858type.uintptr"".autotmp_1857type.uintptr"".autotmp_1856type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*"".State@À?@E   AßTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ"type..eq."".State ’eH‹ %H;awèëêHƒìHH‹\$PH‰$Hƒ<$„ÏH‹\$XH‰\$Hƒ|$„­HÇD$èH‹\$PH‰$Hƒ<$„H‹\$XH‰\$Hƒ|$„]HÇD$èH‹L$PH‹T$X¶\$€ûu +ÆD$hHƒÄHÃH‹YH‹jH9ët +ÆD$hHƒÄHÃH‹YH‹jH9ët +ÆD$hHƒÄHÃH‹qH‹I H‰ÓH‹RH‹C H9Á…ÝH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$èH‹t$PH‹T$X¶\$ €û„™H‰ñHƒþ„…HƒÁ(H‰ÐHƒúttHƒÀ(H‹H‹(H9ëu[‹Y‹h9ëuQH‹YH‹hH9ëuDH‰ðHƒÀ@H‰ÑHƒÁ@H‹H‹)H9ëu!‹X‹i9ëuH‹XH‹iH9ëu +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉ눉étÿÿÿÆD$hHƒÄHÉ%é—þÿÿ‰%éuþÿÿ‰%éGþÿÿ‰%é%þÿÿ + + 0runtime.morestack_noctxtœ$runtime.memequal32Œ$runtime.memequal32´ runtime.eqstring@"".autotmp_1867type.*time.Time"".autotmp_1866type.*time.Time"".autotmp_1863?type.string"".autotmp_1862type.string "".~r30type.bool"".s type.uintptr"".qtype.*"".State"".ptype.*"".StatedÙ  7 MÃTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ*type..hash."".APIPortÀ®eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$ HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éWÿÿÿ + + 0runtime.morestack_noctxt„runtime.memhashèruntime.strhashÌruntime.strhash@@ "".autotmp_1870type.uintptr"".autotmp_1869type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p type.*"".APIPort@ž?@'àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ&type..eq."".APIPortàÌeH‹ %H;awèëêHƒìHH‹L$PH‹D$XH‹H‹(H9ët +ÆD$hHƒÄHÃH‹YH‹hH9ët +ÆD$hHƒÄHÃH‹qH‹IH‹PH‹@H9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹S H‹C(H‹\$XHƒûtWH‹s H‹K(H9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHà + 0runtime.morestack_noctxt¢ runtime.eqstringä runtime.eqstring@"".autotmp_1874type.string"".autotmp_1873type.string"".autotmp_1872?type.string"".autotmp_1871type.string "".~r30type.bool"".s type.uintptr"".q type.*"".APIPort"".p type.*"".APIPortD¹  °° Tgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..hash.[8]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_1877type.int"".autotmp_1876type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[8]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq.[8]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_1881?"type.interface {}"".autotmp_1880"type.interface {}"".autotmp_1879_type.int"".autotmp_1878Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[8]interface {}"".p*type.*[8]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ,type..hash."".EndpointÀ®eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$ HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éWÿÿÿ + + 0runtime.morestack_noctxt„runtime.strhashèruntime.strhashÌruntime.strhash@@ "".autotmp_1884type.uintptr"".autotmp_1883type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p"type.*"".Endpoint@ž?@'àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..eq."".EndpointàÆeH‹ %H;awèëêHƒìHH‹\$PHƒû„sH‹3H‹KH‹\$XHƒû„VH‹H‹CH9Á…<H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„H‹\$PHƒû„ìH‹SH‹CH‹\$XHƒû„ÎH‹sH‹KH9È…³H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût}H‹\$PHƒûtnH‹s H‹K(H‹\$XHƒûtWH‹S H‹C(H9Áu@H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé+ÿÿÿ‰é ÿÿÿÆD$hHƒÄHÉé£þÿÿ‰é†þÿÿ + + 0runtime.morestack_noctxtî runtime.eqstringÐ runtime.eqstring’ runtime.eqstring@"".autotmp_1890type.string"".autotmp_1889type.string"".autotmp_1888type.string"".autotmp_1887type.string"".autotmp_1886?type.string"".autotmp_1885type.string "".~r30type.bool"".s type.uintptr"".q"type.*"".Endpoint"".p"type.*"".Endpoint>Æ °° vºTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþHtype..hash."".ExportContainerOptionsÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashà"runtime.interhash@@ +"".autotmp_1892type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p>type.*"".ExportContainerOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..eq."".ExportContainerOptionsàÎeH‹ %H;awèëêHƒìhH‹\$pHƒû„÷H‹3H‹KH‹\$xHƒû„ÚH‹H‹CH9Á…½H‰t$XH‰4$H‰L$`H‰L$H‰T$HH‰T$H‰D$PH‰D$è¶\$ €û„ƒH‹\$xHƒûttH‹KH‹sH‹\$pHƒût]H‹CH‹SH9ÈuCH‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €ût Æ„$ˆHƒÄhÃÆ„$ˆHƒÄhÉ량ëˆÆ„$ˆHƒÄhÉéÿÿÿ‰éÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring¸runtime.ifaceeq@Ð"".autotmp_1896type.io.Writer"".autotmp_1895_type.io.Writer"".autotmp_1894?type.string"".autotmp_1893type.string "".~r30type.bool"".s type.uintptr"".q>type.*"".ExportContainerOptions"".p>type.*"".ExportContainerOptions2ÐÜÏÐ ÏÐÏа° vºTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·e13351f28add7c60853cb3aac0a0e34eè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ@type..hash."".ExportImageOptionsÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashà"runtime.interhash@@ +"".autotmp_1898type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p6type.*"".ExportImageOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþH‹¬$¶]PL‹„$˜A¶hP@8ëtÆ„$¨HĈÃÆ„$¨HĈÃÆ„$¨HĈÉéhÿÿÿ‰éGÿÿÿÆ„$¨HĈÉéÓþÿÿ‰é²þÿÿÆ„$¨HĈÉé;þÿÿ‰éþÿÿÆ„$¨HĈÉé£ýÿÿ‰é‚ýÿÿÆ„$¨HĈÉé ýÿÿ‰éìüÿÿ +*0runtime.morestack_noctxt runtime.eqstring„ runtime.eqstringø runtime.eqstringæruntime.ifaceeqÌ runtime.ifaceeq@"".autotmp_1918¿type.io.Writer"".autotmp_1917Ÿtype.io.Writer"".autotmp_1916type.io.Reader"".autotmp_1915_type.io.Reader"".autotmp_1914type.string"".autotmp_1913type.string"".autotmp_1912type.string"".autotmp_1911type.string"".autotmp_1910?type.string"".autotmp_1909type.string "".~r30type.bool"".s type.uintptr"".q6type.*"".ImportImageOptions"".p6type.*"".ImportImageOptionsb"ÿÐЇÉTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·af3107c17ee1ab6f9f33230b5c7e3062è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..hash."".KillContainerOptionsÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.memhash@@ +"".autotmp_1920type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p:type.*"".KillContainerOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ@type..eq."".KillContainerOptions€úeH‹ %H;awèëêHƒìHH‹\$PHƒû„H‹3H‹KH‹\$XHƒûtwH‹H‹CH9ÁuaH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût+H‹l$PH‹]L‹D$XI‹hH9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉë…‰élÿÿÿ + 0runtime.morestack_noctxtÞ runtime.eqstring@ "".autotmp_1922?type.string"".autotmp_1921type.string "".~r30type.bool"".s type.uintptr"".q:type.*"".KillContainerOptions"".p:type.*"".KillContainerOptions2ƒ  ÀÀ +nRTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ2type..hash."".LogsOptionsàÂeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„çHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„ªHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„hHƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„&Hƒ$0HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„äHƒ$8HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$@HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HHÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$XHÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ‰%éÎþÿÿ‰%éŒþÿÿ‰%éJþÿÿ‰%é þÿÿ + 0runtime.morestack_noctxt„runtime.strhashð"runtime.interhashÜ"runtime.interhashÈruntime.memhash´runtime.memhash runtime.memhash„runtime.strhashèruntime.memhash@@"".autotmp_1930type.uintptr"".autotmp_1929type.uintptr"".autotmp_1928type.uintptr"".autotmp_1927type.uintptr"".autotmp_1926type.uintptr"".autotmp_1925type.uintptr"".autotmp_1924type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p(type.*"".LogsOptions@¬?@i°° AïTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ.type..eq."".LogsOptionsÀ ¨ eH‹ %H;awèëêHƒìhH‹\$pHƒû„$H‹3H‹KH‹\$xHƒû„H‹H‹CH9Á…êH‰t$XH‰4$H‰L$`H‰L$H‰T$HH‰T$H‰D$PH‰D$è¶\$ €û„°H‹\$xHƒû„šH‹KH‹sH‹\$pHƒû„|H‹CH‹SH9È…^H‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €û„$H‹\$xHƒû„H‹K H‹s(H‹\$pHƒû„ðH‹C H‹S(H9È…ÒH‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €û„˜H‹\$pH‰$Hƒ<$„xHƒ$0H‹\$xH‰\$Hƒ|$„QHƒD$0HÇD$èH‹\$pH‰$Hƒ<$„Hƒ$0H‹\$xH‰\$Hƒ|$„öHƒD$0HÇD$èH‹T$pH‹L$x¶\$€ûu Æ„$ˆHƒÄhÃH‹Z8H‹i8H9ët Æ„$ˆHƒÄhöZ@¶i@@8ët Æ„$ˆHƒÄhÃH‰ÓH‹RHH‹CPH‹qHH‹IPH9ÈuhH‰T$HH‰$H‰D$PH‰D$H‰t$XH‰t$H‰L$`H‰L$è¶\$ €ût2H‹l$p¶]XL‹D$xA¶hX@8ët Æ„$ˆHƒÄhÃÆ„$ˆHƒÄhÃÆ„$ˆHƒÄhÉ%éþþÿÿ‰%é×þÿÿ‰%é£þÿÿ‰%é|þÿÿÆ„$ˆHƒÄhÉé þÿÿ‰éëýÿÿÆ„$ˆHƒÄhÉé}ýÿÿ‰é_ýÿÿÆ„$ˆHƒÄhÉéòüÿÿ‰éÕüÿÿ + 0runtime.morestack_noctxtî runtime.eqstringÐruntime.ifaceeq²runtime.ifaceeqÔ runtime.memequalÚ runtime.memequalŒ + runtime.eqstring@Ð"".autotmp_1939type.string"".autotmp_1938type.string"".autotmp_1937type.io.Writer"".autotmp_1936type.io.Writer"".autotmp_1935type.io.Writer"".autotmp_1934_type.io.Writer"".autotmp_1933?type.string"".autotmp_1932type.string "".~r30type.bool"".s type.uintptr"".q(type.*"".LogsOptions"".p(type.*"".LogsOptionszÐ÷ÏÐÏÐÏÐrÏÐ ÏÐ ÏÐ<ÏÐÏÐÏÐàà vêTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·e13351f28add7c60853cb3aac0a0e34eè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþtype.*"".RemoveContainerOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..eq."".RemoveContainerOptionsÀ¶eH‹ %H;awèëêHƒìHH‹\$PHƒû„«H‹3H‹KH‹\$XHƒû„ŽH‹H‹CH9ÁuxH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$èH‹L$PH‹D$X¶\$ €ût8¶Y¶h@8ët +ÆD$hHƒÄHöY¶h@8ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉékÿÿÿ‰éNÿÿÿ + 0runtime.morestack_noctxtæ runtime.eqstring@ "".autotmp_1969?type.string"".autotmp_1968type.string "".~r30type.bool"".s type.uintptr"".q>type.*"".RemoveContainerOptions"".p>type.*"".RemoveContainerOptions>‡  àà +rnTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþHtype..hash."".RenameContainerOptionsÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashàruntime.strhash@@ +"".autotmp_1971type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p>type.*"".RenameContainerOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþDtype..eq."".RenameContainerOptionsÀ´eH‹ %H;awèëêHƒìHH‹\$PHƒû„êH‹3H‹KH‹\$XHƒû„ÍH‹H‹CH9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹SH‹CH‹\$XHƒûtWH‹sH‹KH9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé,ÿÿÿ‰éÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring° runtime.eqstring@"".autotmp_1975type.string"".autotmp_1974type.string"".autotmp_1973?type.string"".autotmp_1972type.string "".~r30type.bool"".s type.uintptr"".q>type.*"".RenameContainerOptions"".p>type.*"".RenameContainerOptions2Õ    vªTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ8type..hash."".APIImageSearchÀ²eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„ßHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$(HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ + 0runtime.morestack_noctxt„runtime.strhashðruntime.memhashÔruntime.strhash¸runtime.memhash@@"".autotmp_1979type.uintptr"".autotmp_1978type.uintptr"".autotmp_1977type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p.type.*"".APIImageSearch@Ô?@1   AßTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..eq."".APIImageSearchÀ²eH‹ %H;awèëêHƒìHH‹\$PHƒû„)H‹3H‹KH‹\$XHƒû„ H‹H‹CH9Á…òH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$èH‹D$PH‹L$X¶\$ €û„®¶X¶i@8ët +ÆD$hHƒÄHöX¶i@8ët +ÆD$hHƒÄHÃH‹PH‹@ H‹qH‹I H9ÈuaH‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût+H‹l$PH‹](L‹D$XI‹h(H9ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉéíþÿÿ‰éÐþÿÿ + 0runtime.morestack_noctxtî runtime.eqstringü runtime.eqstring@"".autotmp_1983type.string"".autotmp_1982type.string"".autotmp_1981?type.string"".autotmp_1980type.string "".~r30type.bool"".s type.uintptr"".q.type.*"".APIImageSearch"".p.type.*"".APIImageSearchVk   àà vêTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþõ  ÐÐ vÚTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ.type..hash."".doOptionsÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|(runtime.nilinterhashàruntime.memhash@@ +"".autotmp_2015type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p$type.*"".doOptions@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ*type..eq."".doOptions€üeH‹ %H;awèëêHƒìHH‹\$XHƒû„ŽH‹ H‹sH‹\$PHƒûtxH‹H‹SH9ÈubH‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €ût,H‹l$P¶]L‹D$XA¶h@8ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉ넉ékÿÿÿ + 0runtime.morestack_noctxtÞruntime.efaceeq@ "".autotmp_2017?"type.interface {}"".autotmp_2016"type.interface {} "".~r30type.bool"".s type.uintptr"".q$type.*"".doOptions"".p$type.*"".doOptions2„   ÀÀ +nRTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ6type..hash."".hijackOptionsÀ¶eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„!HÇD$ H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„äHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$0HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$@HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ‰%éÓþÿÿ + 0runtime.morestack_noctxt„runtime.memhashð"runtime.interhashÜ"runtime.interhashÀ"runtime.interhash¤(runtime.nilinterhash@@"".autotmp_2022type.uintptr"".autotmp_2021type.uintptr"".autotmp_2020type.uintptr"".autotmp_2019type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p,type.*"".hijackOptions@Š?@;àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ2type..eq."".hijackOptions  +˜ +eH‹ %HD$øH;AwèëåHìˆH‹”$H‹„$˜H‹H‹(H9ëtÆ„$¨HĈöZ¶h@8ëtÆ„$¨HĈÃH‹HH‹pH‹BH‹RH9È…ùH‰D$hH‰$H‰T$pH‰T$H‰L$xH‰L$H‰´$€H‰t$è¶\$ €û„¼H‹œ$˜Hƒû„£H‹K H‹s(H‹œ$Hƒû„‚H‹C H‹S(H9È…aH‰D$HH‰$H‰T$PH‰T$H‰L$XH‰L$H‰t$`H‰t$è¶\$ €û„'H‹œ$˜Hƒû„H‹K0H‹s8H‹œ$Hƒû„íH‹C0H‹S8H9È…ÌH‰D$HH‰$H‰T$PH‰T$H‰L$XH‰L$H‰t$`H‰t$è¶\$ €û„’H‹œ$˜Hƒût}H‹K@H‹sHH‹œ$HƒûtcH‹C@H‹SHH9ÈuFH‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €ûtÆ„$¨HĈÃÆ„$¨HĈÉ뙉é|ÿÿÿÆ„$¨HĈÉé ÿÿÿ‰éëþÿÿÆ„$¨HĈÉéwþÿÿ‰éVþÿÿÆ„$¨HĈà +*0runtime.morestack_noctxtÜruntime.ifaceeqÊruntime.ifaceeq¸runtime.ifaceeqŽruntime.efaceeq@"".autotmp_2030¿"type.interface {}"".autotmp_2029Ÿ"type.interface {}"".autotmp_2028type.io.Writer"".autotmp_2027type.io.Writer"".autotmp_2026type.io.Writer"".autotmp_2025_type.io.Writer"".autotmp_2024?type.io.Reader"".autotmp_2023type.io.Reader "".~r30type.bool"".s type.uintptr"".q,type.*"".hijackOptions"".p,type.*"".hijackOptions\"*º­ãTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·af3107c17ee1ab6f9f33230b5c7e3062è/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..hash.[2]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_2033type.int"".autotmp_2032type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[2]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq.[2]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_2037?"type.interface {}"".autotmp_2036"type.interface {}"".autotmp_2035_type.int"".autotmp_2034Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[2]interface {}"".p*type.*[2]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..hash.[1]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_2040type.int"".autotmp_2039type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[1]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq.[1]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_2044?"type.interface {}"".autotmp_2043"type.interface {}"".autotmp_2042_type.int"".autotmp_2041Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[1]interface {}"".p*type.*[1]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ.type..hash."".dockerEnvÀ®eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éWÿÿÿ + + 0runtime.morestack_noctxt„runtime.strhashèruntime.memhashÌruntime.strhash@@ "".autotmp_2047type.uintptr"".autotmp_2046type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p$type.*"".dockerEnv@ž?@'àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ*type..eq."".dockerEnvÀºeH‹ %H;awèëêHƒìHH‹\$PHƒû„íH‹3H‹KH‹\$XHƒû„ÐH‹H‹CH9Á…¶H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$èH‹D$PH‹L$X¶\$ €ûtv¶X¶i@8ët +ÆD$hHƒÄHÃH‹PH‹@ H‹qH‹I H9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉé)ÿÿÿ‰é ÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstringÆ runtime.eqstring@"".autotmp_2051type.string"".autotmp_2050type.string"".autotmp_2049?type.string"".autotmp_2048type.string "".~r30type.bool"".s type.uintptr"".q$type.*"".dockerEnv"".p$type.*"".dockerEnv>‹T     vªTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ&type..hash."".ErrorÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.memhashàruntime.strhash@@ +"".autotmp_2053type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*"".Error@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ"type..eq."".ErrorÀ°eH‹ %H;awèëêHƒìHH‹L$PH‹D$XH‹H‹(H9ët +ÆD$hHƒÄHÃH‹qH‹IH‹PH‹@H9Áu@H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHà + 0runtime.morestack_noctxtì runtime.eqstring@ "".autotmp_2055?type.string"".autotmp_2054type.string "".~r30type.bool"".s type.uintptr"".qtype.*"".Error"".ptype.*"".Error*T     +u+Tgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ2type..hash."".jsonMessageÀ²eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„ßHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$„¢Hƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$ HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$0HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éRÿÿÿ‰%éÿÿÿ + 0runtime.morestack_noctxt„runtime.strhashðruntime.strhashÔruntime.strhash¸runtime.strhash@@"".autotmp_2059type.uintptr"".autotmp_2058type.uintptr"".autotmp_2057type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p(type.*"".jsonMessage@Ô?@1   AßTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ.type..eq."".jsonMessageàØeH‹ %H;awèëêHƒìHH‹\$PHƒû„üH‹3H‹KH‹\$XHƒû„ßH‹H‹CH9Á…ÅH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €û„‹H‹\$PHƒû„uH‹SH‹CH‹\$XHƒû„WH‹sH‹KH9È…<H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €û„H‹\$PHƒû„ìH‹s H‹K(H‹\$XHƒû„ÎH‹S H‹C(H9Á…³H‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût}H‹\$PHƒûtnH‹S0H‹C8H‹\$XHƒûtWH‹s0H‹K8H9Èu@H‰T$(H‰$H‰D$0H‰D$H‰t$8H‰t$H‰L$@H‰L$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHÉ륉ëŽÆD$hHƒÄHÉé+ÿÿÿ‰é ÿÿÿÆD$hHƒÄHÉé¢þÿÿ‰é„þÿÿÆD$hHƒÄHÉéþÿÿ‰éýýÿÿ + 0runtime.morestack_noctxtî runtime.eqstringÐ runtime.eqstring² runtime.eqstringô runtime.eqstring@"".autotmp_2067type.string"".autotmp_2066type.string"".autotmp_2065type.string"".autotmp_2064type.string"".autotmp_2063type.string"".autotmp_2062type.string"".autotmp_2061?type.string"".autotmp_2060type.string "".~r30type.bool"".s type.uintptr"".q(type.*"".jsonMessage"".p(type.*"".jsonMessageJ· °° vºTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ4type..hash.[3]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_2070type.int"".autotmp_2069type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[3]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq.[3]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_2074?"type.interface {}"".autotmp_2073"type.interface {}"".autotmp_2072_type.int"".autotmp_2071Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[3]interface {}"".p*type.*[3]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ:type..hash."".NoSuchContainerÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.strhashà"runtime.interhash@@ +"".autotmp_2076type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p0type.*"".NoSuchContainer@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ6type..eq."".NoSuchContaineràÎeH‹ %H;awèëêHƒìhH‹\$pHƒû„÷H‹3H‹KH‹\$xHƒû„ÚH‹H‹CH9Á…½H‰t$XH‰4$H‰L$`H‰L$H‰T$HH‰T$H‰D$PH‰D$è¶\$ €û„ƒH‹\$xHƒûttH‹KH‹sH‹\$pHƒût]H‹CH‹SH9ÈuCH‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €ût Æ„$ˆHƒÄhÃÆ„$ˆHƒÄhÉ량ëˆÆ„$ˆHƒÄhÉéÿÿÿ‰éÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring¸runtime.ifaceeq@Ð"".autotmp_2080type.error"".autotmp_2079_type.error"".autotmp_2078?type.string"".autotmp_2077type.string "".~r30type.bool"".s type.uintptr"".q0type.*"".NoSuchContainer"".p0type.*"".NoSuchContainer2ÐÜÏÐ ÏÐÏа° vºTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·e13351f28add7c60853cb3aac0a0e34eè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ(type..hash.[1]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_2083type.int"".autotmp_2082type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[1]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ$type..eq.[1]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_2087?type.string"".autotmp_2086type.string"".autotmp_2085_type.int"".autotmp_2084Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[1]string"".ptype.*[1]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ\go.interface { CloseWrite() error }.CloseWriteàÞeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$0HÇD$8H‹\$(H‰$H‹\$ H‹[ ÿÓH‹L$H‹D$H‰L$0H‰D$8HƒÄà + 0runtime.morestack_noctxt¨ +@0 "".~r0 type.error""..thisJtype.interface { CloseWrite() error }0T/p$p +TTgclocals·78fd77a07ab543a063c3a3049973febeTgclocals·3280bececceccd33cb74587feedb1f9fþ4type..hash."".tlsClientConÀªeH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$tbHÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë• + 0runtime.morestack_noctxt|runtime.memhashà"runtime.interhash@@ +"".autotmp_2090type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*"".tlsClientCon@h?@   +=cTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0type..eq."".tlsClientConÀ°eH‹ %H;awèëêHƒìHH‹T$PH‹D$XH‹H‹(H9ët +ÆD$hHƒÄHÃH‹HH‹pH‹BH‹RH9Èu@H‰D$(H‰$H‰T$0H‰T$H‰L$8H‰L$H‰t$@H‰t$è¶\$ €ût +ÆD$hHƒÄHÃÆD$hHƒÄHà + 0runtime.morestack_noctxtìruntime.ifaceeq@ "".autotmp_2092?type.net.Conn"".autotmp_2091type.net.Conn "".~r30type.bool"".s type.uintptr"".q*type.*"".tlsClientCon"".p*type.*"".tlsClientCon*T     +u+Tgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bè/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.goþ0"".(*tlsClientCon).Close`HHÇD$HÇD$H‹\$H‹+H‰l$é@0crypto/tls.(*Conn).Close0 "".~r1type.error""..this*type.*"".tlsClientCon00&0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ*"".tlsClientCon.CloseàÒeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$8HÇD$@H‹\$ H‰$èH‹L$H‹D$H‰L$8H‰D$@HƒÄà + 0runtime.morestack_noctxt˜0crypto/tls.(*Conn).CloseP0 "".~r10type.error""..this(type."".tlsClientCon0N/p(p +K%Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·3280bececceccd33cb74587feedb1f9fþD"".(*tlsClientCon).ConnectionState@type.crypto/tls.ConnectionState""..this*type.*"".tlsClientCon  * Tgclocals·40de35fb9b773b345d1ee7cba691ea13Tgclocals·3280bececceccd33cb74587feedb1f9fþ>"".tlsClientCon.ConnectionStateÀ¬eH‹ %HD$ˆH;AwèëåHìøH‹Y H…ÛtH¼$H9;uH‰#H¼$1ÀèH‹œ$H‰$èH\$H¬$€H‰ïH‰ÞèHœ$€H¬$H‰ïH‰ÞèHÄøà +*0runtime.morestack_noctxtŒÄ runtime.duffzero®Dcrypto/tls.(*Conn).ConnectionStateÞÜ runtime.duffcopy”Ü runtime.duffcopy ð"".autotmp_2094ï>type.crypto/tls.ConnectionState "".~r10>type.crypto/tls.ConnectionState""..this(type."".tlsClientCon"ðsï  ,  +VJTgclocals·25609300e15c97db07af80faee4d2fd6Tgclocals·b0f264e78fa38c77ad79fe8a353279f7þ8"".(*tlsClientCon).Handshake`HHÇD$HÇD$H‹\$H‹+H‰l$é@8crypto/tls.(*Conn).Handshake0 "".~r1type.error""..this*type.*"".tlsClientCon00.0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ2"".tlsClientCon.HandshakeàÒeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$8HÇD$@H‹\$ H‰$èH‹L$H‹D$H‰L$8H‰D$@HƒÄà + 0runtime.morestack_noctxt˜8crypto/tls.(*Conn).HandshakeP0 "".~r10type.error""..this(type."".tlsClientCon0N/p0p +K%Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·3280bececceccd33cb74587feedb1f9fþ8"".(*tlsClientCon).LocalAddr`HHÇD$HÇD$H‹\$H‹+H‰l$é@8crypto/tls.(*Conn).LocalAddr0 "".~r1type.net.Addr""..this*type.*"".tlsClientCon0020Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ2"".tlsClientCon.LocalAddràÒeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$8HÇD$@H‹\$ H‰$èH‹L$H‹D$H‰L$8H‰D$@HƒÄà + 0runtime.morestack_noctxt˜8crypto/tls.(*Conn).LocalAddrP0 "".~r10type.net.Addr""..this(type."".tlsClientCon0N/p4p +K%Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·3280bececceccd33cb74587feedb1f9fþ>"".(*tlsClientCon).OCSPResponse`ZHÇD$HÇD$HÇD$ H‹\$H‹+H‰l$éR>crypto/tls.(*Conn).OCSPResponse@ "".~r1type.[]uint8""..this*type.*"".tlsClientCon0060Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ8"".tlsClientCon.OCSPResponse€øeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#HÇD$@HÇD$HHÇD$PH‹\$(H‰$èH‹T$H‹L$H‹D$H‰T$@H‰L$HH‰D$PHƒÄ à + 0runtime.morestack_noctxtª>crypto/tls.(*Conn).OCSPResponse`@ "".~r10type.[]uint8""..this(type."".tlsClientCon@a?€8€ +T,Tgclocals·da455f41cf2a78c8890074a4a256bdd4Tgclocals·3280bececceccd33cb74587feedb1f9fþ."".(*tlsClientCon).Read`ZHÇD$0HÇD$8HÇD$(H‹\$H‹+H‰l$éR.crypto/tls.(*Conn).Readp"crypto/tls.err·2Ptype.errorcrypto/tls.n·1@type.intcrypto/tls.b·4type.[]uint8""..this*type.*"".tlsClientCon00:0Tgclocals·9877a4ef732a0f966b889793f9b99b87Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".tlsClientCon.ReadÀ®eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#HÇD$xHÇ„$€H‹\$@H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$èH‹T$ H‹L$(H‹D$0H‰T$pH‰L$xH‰„$€HƒÄ8à + 0runtime.morestack_noctxtÚ.crypto/tls.(*Conn).Readp"crypto/tls.err·2ptype.errorcrypto/tls.n·1`type.intcrypto/tls.b·40type.[]uint8""..this(type."".tlsClientConp|o + <  +l4Tgclocals·0273bd9c87bb10f67d516fbf00fd7767Tgclocals·3280bececceccd33cb74587feedb1f9fþ:"".(*tlsClientCon).RemoteAddr`HHÇD$HÇD$H‹\$H‹+H‰l$é@:crypto/tls.(*Conn).RemoteAddr0 "".~r1type.net.Addr""..this*type.*"".tlsClientCon00>0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ4"".tlsClientCon.RemoteAddràÒeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$8HÇD$@H‹\$ H‰$èH‹L$H‹D$H‰L$8H‰D$@HƒÄà + 0runtime.morestack_noctxt˜:crypto/tls.(*Conn).RemoteAddrP0 "".~r10type.net.Addr""..this(type."".tlsClientCon0N/p@p +K%Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·3280bececceccd33cb74587feedb1f9fþ<"".(*tlsClientCon).SetDeadline`HHÇD$(HÇD$0H‹\$H‹+H‰l$é@þ6"".tlsClientCon.SetDeadline ŠeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$hHÇD$pH‹\$8H‰$H‹\$PH‰\$‹\$X‰\$H‹\$`H‰\$èH‹L$ H‹D$(H‰L$hH‰D$pHƒÄ0à + 0runtime.morestack_noctxtÐþD"".(*tlsClientCon).SetReadDeadline`HHÇD$(HÇD$0H‹\$H‹+H‰l$é@Dcrypto/tls.(*Conn).SetReadDeadline` "".~r2@type.errorcrypto/tls.t·3type.time.Time""..this*type.*"".tlsClientCon00F0Tgclocals·86b4418f46455e3a0eb577619691d10fTgclocals·3280bececceccd33cb74587feedb1f9fþ>"".tlsClientCon.SetReadDeadline ŠeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$hHÇD$pH‹\$8H‰$H‹\$PH‰\$‹\$X‰\$H‹\$`H‰\$èH‹L$ H‹D$(H‰L$hH‰D$pHƒÄ0à + 0runtime.morestack_noctxtÐDcrypto/tls.(*Conn).SetReadDeadline€` "".~r2`type.errorcrypto/tls.t·30type.time.Time""..this(type."".tlsClientCon`j_ H +g)Tgclocals·be9b149192cd561578dd28b30f28e84fTgclocals·3280bececceccd33cb74587feedb1f9fþF"".(*tlsClientCon).SetWriteDeadline`HHÇD$(HÇD$0H‹\$H‹+H‰l$é@Fcrypto/tls.(*Conn).SetWriteDeadline` "".~r2@type.errorcrypto/tls.t·3type.time.Time""..this*type.*"".tlsClientCon00J0Tgclocals·86b4418f46455e3a0eb577619691d10fTgclocals·3280bececceccd33cb74587feedb1f9fþ@"".tlsClientCon.SetWriteDeadline ŠeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$hHÇD$pH‹\$8H‰$H‹\$PH‰\$‹\$X‰\$H‹\$`H‰\$èH‹L$ H‹D$(H‰L$hH‰D$pHƒÄ0à + 0runtime.morestack_noctxtÐFcrypto/tls.(*Conn).SetWriteDeadline€` "".~r2`type.errorcrypto/tls.t·30type.time.Time""..this(type."".tlsClientCon`j_ L +g)Tgclocals·be9b149192cd561578dd28b30f28e84fTgclocals·3280bececceccd33cb74587feedb1f9fþB"".(*tlsClientCon).VerifyHostname`HHÇD$ HÇD$(H‹\$H‹+H‰l$é@Bcrypto/tls.(*Conn).VerifyHostnameP "".~r20type.error$crypto/tls.host·3type.string""..this*type.*"".tlsClientCon00N0Tgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþ<"".tlsClientCon.VerifyHostname€úeH‹ %H;awèëêHƒì(H‹Y H…Ût H|$0H9;uH‰#HÇD$XHÇD$`H‹\$0H‰$H‹\$HH‰\$H‹\$PH‰\$èH‹L$H‹D$ H‰L$XH‰D$`HƒÄ(à + 0runtime.morestack_noctxtÀBcrypto/tls.(*Conn).VerifyHostnamepP "".~r2Ptype.error$crypto/tls.host·30type.string""..this(type."".tlsClientConPbO€P€ +_!Tgclocals·be4f16eacaf744756abcb34364e01385Tgclocals·3280bececceccd33cb74587feedb1f9fþ0"".(*tlsClientCon).Write`ZHÇD$(HÇD$0HÇD$8H‹\$H‹+H‰l$éR0crypto/tls.(*Conn).Writep "".~r3Ptype.error "".~r2@type.intcrypto/tls.b·4type.[]uint8""..this*type.*"".tlsClientCon00R0Tgclocals·9877a4ef732a0f966b889793f9b99b87Tgclocals·3280bececceccd33cb74587feedb1f9fþ*"".tlsClientCon.WriteÀ®eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#HÇD$xHÇ„$€H‹\$@H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$èH‹T$ H‹L$(H‹D$0H‰T$pH‰L$xH‰„$€HƒÄ8à + 0runtime.morestack_noctxtÚ0crypto/tls.(*Conn).Writep "".~r3ptype.error "".~r2`type.intcrypto/tls.b·40type.[]uint8""..this(type."".tlsClientConp|o + T  +l4Tgclocals·0273bd9c87bb10f67d516fbf00fd7767Tgclocals·3280bececceccd33cb74587feedb1f9fþZ"".(*tlsClientCon).crypto/tls.clientHandshake`HHÇD$HÇD$H‹\$H‹+H‰l$é@Dcrypto/tls.(*Conn).clientHandshake0 "".~r1type.error""..this*type.*"".tlsClientCon00V0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþT"".tlsClientCon.crypto/tls.clientHandshakeàÒeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$8HÇD$@H‹\$ H‰$èH‹L$H‹D$H‰L$8H‰D$@HƒÄà + 0runtime.morestack_noctxt˜Dcrypto/tls.(*Conn).clientHandshakeP0 "".~r10type.error""..this(type."".tlsClientCon0N/pXp +K%Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·3280bececceccd33cb74587feedb1f9fþV"".(*tlsClientCon).crypto/tls.decryptTicket@@HÇD$(ÆD$0H‹\$H‹+H‰l$é8@crypto/tls.(*Conn).decryptTicket` "".~r3Ptype.bool "".~r2@:type.*crypto/tls.sessionState.crypto/tls.encrypted·4type.[]uint8""..this*type.*"".tlsClientCon  Z Tgclocals·9f0d5ba6770c4a1ed4fa771547e96df1Tgclocals·3280bececceccd33cb74587feedb1f9fþP"".tlsClientCon.crypto/tls.decryptTicket€èeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$8H‰$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$èH‹L$ ¶\$(H‰L$hˆ\$pHƒÄ0à + 0runtime.morestack_noctxt°@crypto/tls.(*Conn).decryptTicket€` "".~r3ptype.bool "".~r2`:type.*crypto/tls.sessionState.crypto/tls.encrypted·40type.[]uint8""..this(type."".tlsClientCon`Y_ €\€ +W)Tgclocals·4e44481e9dee421443081e94ffaa0dd2Tgclocals·3280bececceccd33cb74587feedb1f9fþV"".(*tlsClientCon).crypto/tls.encryptTicket€~HÇD$HÇD$ HÇD$(HÇD$0HÇD$8H‹\$H‹+H‰l$év@crypto/tls.(*Conn).encryptTicketp "".~r3Ptype.error "".~r2 type.[]uint8&crypto/tls.state·4:type.*crypto/tls.sessionState""..this*type.*"".tlsClientCon@@^@Tgclocals·9877a4ef732a0f966b889793f9b99b87Tgclocals·3280bececceccd33cb74587feedb1f9fþP"".tlsClientCon.crypto/tls.encryptTicket€äeH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#HÇD$`HÇD$hHÇD$pHÇD$xHÇ„$€H‹\$@H‰$H‹\$XH‰\$èH‹t$H‹l$H‹T$ H‹L$(H‹D$0H‰t$`H‰l$hH‰T$pH‰L$xH‰„$€HƒÄ8à + 0runtime.morestack_noctxtè@crypto/tls.(*Conn).encryptTicketp "".~r3ptype.error "".~r2@type.[]uint8&crypto/tls.state·40:type.*crypto/tls.sessionState""..this(type."".tlsClientConp—oÀ`À +sMTgclocals·0273bd9c87bb10f67d516fbf00fd7767Tgclocals·3280bececceccd33cb74587feedb1f9fþV"".(*tlsClientCon).crypto/tls.readHandshake€lHÇD$HÇD$HÇD$ HÇD$(H‹\$H‹+H‰l$éd@crypto/tls.(*Conn).readHandshakeP "".~r20type.error "".~r1"type.interface {}""..this*type.*"".tlsClientCon@@b@Tgclocals·5dfce38b1d248a3900c6ec750de77702Tgclocals·3280bececceccd33cb74587feedb1f9fþP"".tlsClientCon.crypto/tls.readHandshake žeH‹ %H;awèëêHƒì(H‹Y H…Ût H|$0H9;uH‰#HÇD$HHÇD$PHÇD$XHÇD$`H‹\$0H‰$èH‹l$H‹T$H‹L$H‹D$ H‰l$HH‰T$PH‰L$XH‰D$`HƒÄ(à + 0runtime.morestack_noctxt¼@crypto/tls.(*Conn).readHandshakepP "".~r2Ptype.error "".~r10"type.interface {}""..this(type."".tlsClientConPtOd +]3Tgclocals·d93d6c9fc85d7888b8b1832756680f45Tgclocals·3280bececceccd33cb74587feedb1f9fþP"".(*tlsClientCon).crypto/tls.readRecord`HHÇD$HÇD$ H‹\$H‹+H‰l$é@:crypto/tls.(*Conn).readRecord@ "".~r2 type.error$crypto/tls.want·34type.crypto/tls.recordType""..this*type.*"".tlsClientCon00f0Tgclocals·6a2e5ab2d393a1bfd331903fbd0fd425Tgclocals·3280bececceccd33cb74587feedb1f9fþJ"".tlsClientCon.crypto/tls.readRecord€äeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#HÇD$HHÇD$PH‹\$(H‰$¶\$@ˆ\$èH‹L$H‹D$H‰L$HH‰D$PHƒÄ à + 0runtime.morestack_noctxtª:crypto/tls.(*Conn).readRecord`@ "".~r2@type.error$crypto/tls.want·304type.crypto/tls.recordType""..this(type."".tlsClientCon@W?€h€ +T,Tgclocals·c776d40308d3cc87dab399555a94d3caTgclocals·3280bececceccd33cb74587feedb1f9fþN"".(*tlsClientCon).crypto/tls.sendAlert`HHÇD$HÇD$ H‹\$H‹+H‰l$é@8crypto/tls.(*Conn).sendAlert@ "".~r2 type.error"crypto/tls.err·3*type.crypto/tls.alert""..this*type.*"".tlsClientCon00j0Tgclocals·6a2e5ab2d393a1bfd331903fbd0fd425Tgclocals·3280bececceccd33cb74587feedb1f9fþH"".tlsClientCon.crypto/tls.sendAlert€äeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#HÇD$HHÇD$PH‹\$(H‰$¶\$@ˆ\$èH‹L$H‹D$H‰L$HH‰D$PHƒÄ à + 0runtime.morestack_noctxtª8crypto/tls.(*Conn).sendAlert`@ "".~r2@type.error"crypto/tls.err·30*type.crypto/tls.alert""..this(type."".tlsClientCon@W?€l€ +T,Tgclocals·c776d40308d3cc87dab399555a94d3caTgclocals·3280bececceccd33cb74587feedb1f9fþZ"".(*tlsClientCon).crypto/tls.sendAlertLocked`HHÇD$HÇD$ H‹\$H‹+H‰l$é@Dcrypto/tls.(*Conn).sendAlertLocked@ "".~r2 type.error"crypto/tls.err·3*type.crypto/tls.alert""..this*type.*"".tlsClientCon00n0Tgclocals·6a2e5ab2d393a1bfd331903fbd0fd425Tgclocals·3280bececceccd33cb74587feedb1f9fþT"".tlsClientCon.crypto/tls.sendAlertLocked€äeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#HÇD$HHÇD$PH‹\$(H‰$¶\$@ˆ\$èH‹L$H‹D$H‰L$HH‰D$PHƒÄ à + 0runtime.morestack_noctxtªDcrypto/tls.(*Conn).sendAlertLocked`@ "".~r2@type.error"crypto/tls.err·30*type.crypto/tls.alert""..this(type."".tlsClientCon@W?€p€ +T,Tgclocals·c776d40308d3cc87dab399555a94d3caTgclocals·3280bececceccd33cb74587feedb1f9fþZ"".(*tlsClientCon).crypto/tls.serverHandshake`HHÇD$HÇD$H‹\$H‹+H‰l$é@Dcrypto/tls.(*Conn).serverHandshake0 "".~r1type.error""..this*type.*"".tlsClientCon00r0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþT"".tlsClientCon.crypto/tls.serverHandshakeàÒeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#HÇD$8HÇD$@H‹\$ H‰$èH‹L$H‹D$H‰L$8H‰D$@HƒÄà + 0runtime.morestack_noctxt˜Dcrypto/tls.(*Conn).serverHandshakeP0 "".~r10type.error""..this(type."".tlsClientCon0N/ptp +K%Tgclocals·57e1009a600f832f844e0e3c49ba5a89Tgclocals·3280bececceccd33cb74587feedb1f9fþX"".(*tlsClientCon).crypto/tls.tryCipherSuite@6HÇD$8H‹\$H‹+H‰l$é.Bcrypto/tls.(*Conn).tryCipherSuitep "".~r6`8type.*crypto/tls.cipherSuite*crypto/tls.ecdsaOk·7Vtype.bool0crypto/tls.ellipticOk·6Ttype.bool*crypto/tls.version·5Ptype.uint16Fcrypto/tls.supportedCipherSuites·4 type.[]uint16 crypto/tls.id·3type.uint16""..this*type.*"".tlsClientCon  v Tgclocals·a99c50f5f5d34b1bf54d8ece6dad05c2Tgclocals·3280bececceccd33cb74587feedb1f9fþR"".tlsClientCon.crypto/tls.tryCipherSuiteÀ¬eH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹\$@H‰$H·\$Xf‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H·\$xf‰\$(¶\$zˆ\$*¶\${ˆ\$+èH‹\$0H‰œ$€HƒÄ8à + 0runtime.morestack_noctxt€Bcrypto/tls.(*Conn).tryCipherSuitep "".~r6€8type.*crypto/tls.cipherSuite*crypto/tls.ecdsaOk·7vtype.bool0crypto/tls.ellipticOk·6ttype.bool*crypto/tls.version·5ptype.uint16Fcrypto/tls.supportedCipherSuites·4@type.[]uint16 crypto/tls.id·30type.uint16""..this(type."".tlsClientConp{o  x  +!Tgclocals·adf7fd756b6e86afbfe88b4b789f56a2Tgclocals·3280bececceccd33cb74587feedb1f9fþR"".(*tlsClientCon).crypto/tls.writeRecord`ZHÇD$8HÇD$@HÇD$0H‹\$H‹+H‰l$éRþL"".tlsClientCon.crypto/tls.writeRecordàÒeH‹ %H;awèëêHƒì@H‹Y H…Ût H|$HH9;uH‰#HÇ„$ˆHÇ„$H‹\$HH‰$¶\$`ˆ\$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$ èH‹T$(H‹L$0H‹D$8H‰”$€H‰Œ$ˆH‰„$HƒÄ@à + 0runtime.morestack_noctxtòþ,4go.itab.*os.File.io.Readerþ go.string."HOME"0*HOME go.string."HOME"þ&go.string.".docker"00.docker &go.string.".docker"þ.go.string."config.json"@8 config.json .go.string."config.json"þ,go.string.".dockercfg"@6 +.dockercfg ,go.string.".dockercfg"þTgclocals·2e2a9972ea9ced3a58f9e7510cf4914d0€ " "€€  þTgclocals·b8a8407971613b03b21a64dc1e56fba0PPþTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·9edc1f6d8fc7336ae101b48cbf822a45 + þ"go.string."auths"0,auths "go.string."auths"þTgclocals·74b1ee12d224f81a4fda25605fab855d``$ þTgclocals·e94084972e98c8fdf7f2203a35ca807a88 + þgo.string.":"0$: go.string.":"þTgclocals·1c0f8a36a8ada2462e7c582fc8286897¸¸R €ªV‚€€ªV€€ˆˆªV€€ˆˆªVþTgclocals·f565a1229afec041643831d3cd6a3b7dHHþ.go.string."conf is nil"@8 conf is nil .go.string."conf is nil"þ go.string."POST"0*POST go.string."POST"þ"go.string."/auth"0,/auth "go.string."/auth"þ>go.string."auth error (%d): %s"PHauth error (%d): %s >go.string."auth error (%d): %s"þTgclocals·25ee8e11891a6b427c03a740dc761f96@@(€ÿÿþTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þgo.string."C"0$C go.string."C"þgo.string."A"0$A go.string."A"þgo.string."D"0$D go.string."D"þ"go.string."%s %s"0,%s %s "go.string."%s %s"þTgclocals·0b4080736ceb8b2da6f0b7e8a876e6b8(("üüþTgclocals·6d340c3bdac448a6ef1256f331f68dd3((þgo.string."."0$. go.string."."þLgo.string."Unable to parse version %q"`VUnable to parse version %q Lgo.string."Unable to parse version %q"þxgo.string."Unable to parse version %q: %q is not an integer"‚0Unable to parse version %q: %q is not an integer xgo.string."Unable to parse version %q: %q is not an integer"þTgclocals·979c84cf2ee7fa703a7cd5365c579635€€>€< €< +€ˆ À?ˆÀ?þTgclocals·56fad8922133a82d7e9abffb05067a58HHþTgclocals·37f4150aca71c16b472a5e6f54a4a2bc((þTgclocals·c45f1008acf31f9ce337f7dfa1fa0204(( +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d83eab2a3f0aa562c88b153605ebed26‚þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d83eab2a3f0aa562c88b153605ebed26‚þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d83eab2a3f0aa562c88b153605ebed26‚þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d83eab2a3f0aa562c88b153605ebed26‚þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d83eab2a3f0aa562c88b153605ebed26‚þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·5dfce38b1d248a3900c6ec750de77702 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·8e51ba8a606dfe7bf8ea610f35b1860a""þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a3afb5a83dcf14cc57a3d3da3be3a7df"þTgclocals·63a71a9d82a0cb5094b44aef6b6fe396PP"*(þTgclocals·677e212df4ff2dc5d1bd7207f0cb343fPP""""""""þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·c984d5bd78e9da313cca302adec9d408""þTgclocals·f27fde19da2a9a9e0264e00d44cbb36a(( þTgclocals·e11b7011fe7d18f281fa367784f98637((""""""þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·0528ab8f76149a707fd2f0025c2178a3þgo.string."://"0(:// go.string."://"þhgo.string."could not split %s into two parts by ://"€r(could not split %s into two parts by :// hgo.string."could not split %s into two parts by ://"þ,go.string."https://%s"@6 +https://%s ,go.string."https://%s"þ(go.string."cert.pem"@2cert.pem (go.string."cert.pem"þ&go.string."key.pem"00key.pem &go.string."key.pem"þ$go.string."ca.pem"0.ca.pem $go.string."ca.pem"þTgclocals·bae70cbfa95aa7f2f402b02d37b0b239àà <ÈÈÀÀÀ€  ˆ€ ˆ ˆ€ ˆ þTgclocals·7df0f47b43308a447b1c5362b1e10571xx +þ,Bgo.itab.*errors.errorString.errorþ,bgo.itab.*net/http.Transport.net/http.RoundTripperþTgo.string."Both cert and key are required"`^Both cert and key are required Tgo.string."Both cert and key are required"þHgo.string."Could not add RootCA pem"`RCould not add RootCA pem Hgo.string."Could not add RootCA pem"þTgclocals·6eba1717ce5fb698f2f3dfc9fb10f9ab˜˜`  ¼ €  ¼ € ‚€ð‚€  +  +€  +   +  + *   +  + + + +Š +Š +þTgclocals·7d1b151141fa06142d6be12f40c65cbcèè "‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚"‚þTgclocals·44e348188e22fef6300f71ab26e45197 °þTgclocals·d7e8a62d22b1cde6d92b17a55c33fe8f þ,.go.itab.*"".Error.errorþ$go.string."/_ping"0./_ping $go.string."/_ping"þgo.string."GET"0(GET go.string."GET"þTgclocals·de8e430848d9f174d000c8b092bf66b4((þTgclocals·6d340c3bdac448a6ef1256f331f68dd3((þ(go.string."/version"@2/version (go.string."/version"þªgo.string."Received unexpected status %d while trying to retrieve the server version"À´IReceived unexpected status %d while trying to retrieve the server version ªgo.string."Received unexpected status %d while trying to retrieve the server version"þ,go.string."ApiVersion"@6 +ApiVersion ,go.string."ApiVersion"þTgclocals·3bb049eac63b5508d15152b6e410d69f88 < <€þTgclocals·31c26a3eb5003a6c37416d296e2bd48788 +þ,>go.itab.*bytes.Buffer.io.Readerþ,go.string."User-Agent"@6 +User-Agent ,go.string."User-Agent"þ6go.string."go-dockerclient"@@go-dockerclient 6go.string."go-dockerclient"þ0go.string."Content-Type"@: Content-Type 0go.string."Content-Type"þ8go.string."application/json"PBapplication/json 8go.string."application/json"þ,go.string."plain/text"@6 +plain/text ,go.string."plain/text"þ go.string."unix"0*unix go.string."unix"þgo.itab.*bytes.Reader.io.Readerþgo.string."PUT"0(PUT go.string."PUT"þ*go.string."%s %s\x0d"0.%s %s  *go.string."%s %s\x0d"þTgclocals·147deda5d3defe8e1d522f194155c84f˜ ˜ ž€€¨jÀˆÀÀÀÀÀ  , À À ü ü À À þTgclocals·7e331f181b2554581236d61d5561e53døø ŠäîŠääääääääääîŠäîŠääîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîŠäîþgo.string."tcp"0(tcp go.string."tcp"þTgclocals·65dfd25068bbba2abebc869f9ef9f7a5˜˜T€€ˆ€°°€°€° +€°€° ° €°¢€° €° ˆ° ¨° ª°€ ª°€ *° *° *° *°*°(°°°°°°þTgclocals·84e82484f467e1dc08e5640e075b9b76ÀÀ"ŠØî>ŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØîþŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>ŠØî>þgo.string."/"0$/ go.string."/"þ(go.string."%s/v%s%s"@2%s/v%s%s (go.string."%s/v%s%s"þ go.string."%s%s"0*%s%s go.string."%s%s"þTgclocals·514c3d378a44440bceb597de19ebfbf7pp." ü?"ü?" ü"üþTgclocals·61dac2719f307a892a4a15123f2e6a2d@@ + + + + + + +þgo.string."qs"0&qs go.string."qs"þgo.string."-"0$- go.string."-"þTgclocals·928ad969a3698656dfa33d91e2ca9cd1ˆˆJhhh€ÈJB +hþTgclocals·c69849cba6bf70a13b3371331d258b5088þgo.string."1"0$1 go.string."1"þTgclocals·a157d5303e3a20a2392145ef25e4599bððx  €€ €   þTgclocals·b8c550e5e1ba1f11f1bc237b9d0f0dc8¨¨ ŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠþTgclocals·e1ae6533a9e39048ba0735a2264ce16a þTgclocals·3e69739b44630d52358b28c7a0e238fa  + þgo.string."/containers/create?"PH/containers/create? >go.string."/containers/create?"þTgclocals·2ca41a02a2a5788f97a4be1897b36700pp*ˆ€ð€€þTgclocals·3f5a7d1842b14039f35be09ef67df5f8@@ŠŠŠŠŠŠþ$go.string."always"0.always $go.string."always"þTgclocals·0528ab8f76149a707fd2f0025c2178a3þTgclocals·0528ab8f76149a707fd2f0025c2178a3þ,go.string."on-failure"@6 +on-failure ,go.string."on-failure"þTgclocals·0528ab8f76149a707fd2f0025c2178a3þTgclocals·2d8f3a7439ca173dec4205ff264b0edcþgo.string."no"0&no go.string."no"þTgclocals·0528ab8f76149a707fd2f0025c2178a3þTgclocals·0528ab8f76149a707fd2f0025c2178a3þ,Rgo.itab.*"".ContainerAlreadyRunning.errorþ$go.string."/start"0./start $go.string."/start"þTgclocals·1497b0fbec88b963d1dc5f4cf942151688  þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþ,Jgo.itab.*"".ContainerNotRunning.errorþHgo.string."/containers/%s/stop?t=%d"`R/containers/%s/stop?t=%d Hgo.string."/containers/%s/stop?t=%d"þTgclocals·5c42a9dee0c88889a167a0d13b7c2026``(ÿÿþTgclocals·42785a4ae44025160cf24924f7d01efb88 JJJJJþNgo.string."/containers/%s/restart?t=%d"`X/containers/%s/restart?t=%d Ngo.string."/containers/%s/restart?t=%d"þTgclocals·5bacfca50b7e6b4494d1c0d96e8b2c7cPP&€Â?€À?þTgclocals·1da38d5d89527cd2ab312249704d85d700 JJJJþ@go.string."/containers/%s/pause"PJ/containers/%s/pause @go.string."/containers/%s/pause"þTgclocals·556e2b84f9ef2d507be121d828e30b96PP"€< €<þTgclocals·fe0d626f6a1a9cb0d3493cb8c292091b00 + + + + +þDgo.string."/containers/%s/unpause"PN/containers/%s/unpause Dgo.string."/containers/%s/unpause"þTgclocals·556e2b84f9ef2d507be121d828e30b96PP"€< €<þTgclocals·fe0d626f6a1a9cb0d3493cb8c292091b00 + + + + +þ.go.string."?ps_args=%s"@8 ?ps_args=%s .go.string."?ps_args=%s"þ@go.string."/containers/%s/top%s"PJ/containers/%s/top%s @go.string."/containers/%s/top%s"þTgclocals·6a9f496e2cfbe0515ededb4c2d64a743:    " ü"ü +þTgclocals·950e6e6b9e7c3fe47672289f0a6f6e8bPPŠŠŠŠŠŠŠŠþ,@go.itab.*io.PipeReader.io.ReaderþTgclocals·1bc79a478470e6209b0ca20d87c54bd38 "* *"*"*"*@ª"*@ª@ª€" +@ª€ +@ª€ @ª @ª @ª  @ª @ª @ªÂ@ª@ª@ª@ª,@ª€@ªþTgclocals·378f3e900c220a5cad1989c9c06023bdÐЊŠŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙŠÙþ$go.string."/kill?"0./kill? $go.string."/kill?"þTgclocals·1705812f15ec71868ae696027438a358(( þTgclocals·bd92ef728a38faac78badef3588d832f(( JJJþgo.string."?"0$? go.string."?"þ$go.string."DELETE"0.DELETE $go.string."DELETE"þTgclocals·1705812f15ec71868ae696027438a358(( þTgclocals·bd92ef728a38faac78badef3588d832f(( JJJþ>go.string."/containers/%s/copy"PH/containers/%s/copy >go.string."/containers/%s/copy"þTgclocals·4e703ba17638508264f032b5f70033cd<ðð ð++þTgclocals·387212f77114c618c992aca1d7f6e2d3PP®®®®®®®®þ"go.string."/wait"0,/wait "go.string."/wait"þTgclocals·ec5d02e01ec699817d1c71b60a3fa4d088| þTgclocals·eda57d60e805297221010beefc01cf3d88 + + + + +þ(go.string."/commit?"@2/commit? (go.string."/commit?"þTgclocals·81f50624430372e4ea6dfcf6f12b35fd``6""" þTgclocals·9de5ccc45996de67be5d048aaa3bec9388ŠˆˆŠˆˆŠˆˆŠˆˆŠˆˆþ(go.string."/attach?"@2/attach? (go.string."/attach?"þTgclocals·504ff4b35adae90119a7640e996e47cf@@4€ìnþTgclocals·acacf92de43844ed4d32a3b47fdcea71((ÊîfÊîfÊîfþgo.string."all"0(all go.string."all"þ$go.string."/logs?"0./logs? $go.string."/logs?"þTgclocals·75d51badeb043219a3daddeb664f46bc@@4 »•þTgclocals·2c6bb9a575800b4fd811118aedd59a39((Ên%Ên%Ên%þgo.string."h"0$h go.string."h"þgo.string."w"0$w go.string."w"þ(go.string."/resize?"@2/resize? (go.string."/resize?"þTgclocals·564b0cd8045e1e3a560aecfc019285da   *((€(€ € "¢€"€ ÀþTgclocals·78ce512784b85b97418b7726f81bf730XX JJJJJJJJJþBgo.string."/containers/%s/export"PL/containers/%s/export Bgo.string."/containers/%s/export"þTgclocals·fcc516824ce26c0001e09dc1d3d75478PP&¼<þTgclocals·48afd233022498cd45f3b0138014243e00ÊÊÊÊþ>go.string."No such container: "PHNo such container:  >go.string."No such container: "þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·a08e9001cb8f9d822225de3b8e406515þNgo.string."Container already running: "`XContainer already running:  Ngo.string."Container already running: "þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þFgo.string."Container not running: "PPContainer not running:  Fgo.string."Container not running: "þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·14c45952157723c8762210d9c661bf29 + +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þgo.string." \t"0&  go.string." \t"þgo.string."0"0$0 go.string."0"þ"go.string."false"0,false "go.string."false"þ go.string."none"0*none go.string."none"þTgclocals·23c4785fa8abd7e258acfe91c9f325f3  þTgclocals·9ff42bf311af152488d11f0f78c8d5ce  + +þgo.string."="0$= go.string."="þTgclocals·2d894b3b66dff3ff7aaa2a78013804f9€€$  €þTgclocals·f774b632f7ff7d029527413a83030842HHJJJJJJJþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·6fac742cdcfec8bff38f6662e683bbda00€þTgclocals·2cda55eacf8f3a391cf15caecdfeef0600JJJJþTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·6fac742cdcfec8bff38f6662e683bbda00€þTgclocals·2cda55eacf8f3a391cf15caecdfeef0600JJJJþTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·528c559c9193f2a671691be2686ab724ÊþTgclocals·299a4d24490b926d38628658bb77eeb1PP$€þTgclocals·bb06efbb6a26e0f286c10766fad350d700ÊÊÊÊþTgclocals·80f0398afc092a879ad303c2fec80b66@@ *(þTgclocals·f09ff24693e6d72e9e2f82319a6e45a0@@ + + + + + +þTgclocals·61e2515c69061b8fed0e66ece719f936 þTgclocals·ff7af1025fb7deae6ebf3487eab30c33 ŠŠþTgclocals·f9166171185d1f1926264897a0c959c1(( þTgclocals·46b690808f7e1a8626f300054e53774f(( +ŠŠŠþTgclocals·8f12e5afe7e149987419843938d69919``, +ªZþTgclocals·784852ecd61fa458e8af6c57e3ee02b888 +.....þgo.string."%v"0&%v go.string."%v"þTgclocals·7c09a673592d13ccf4305e509b0c4fdf°° F€€ < < € þTgclocals·f2bff8318847e30874c64d3cd9d3a459pp +ÊÊÊÊÊÊÊÊÊÊÊÊþTgclocals·f3e8856499aee240134cb47f88c6cd55  +þTgclocals·2148c3737b2bb476685a1100a2e8343e þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·551282070bdf4bca9f3b8ada2a8f2d2a(( þTgclocals·4ab27d0e7d4f80bb5765ef5f61de5fe5(( +º +þTgclocals·158185e77a15ce9170c1aa92e62cd73e00 +þTgclocals·00180cfd7eeeff04c22905d29bdac05200 +º + +þTgclocals·29f0050a5ee7c2b9348a75428171d7de þTgclocals·ac5bea9c8a91f5fb1d31bdacc5067b57 þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d3486bc7ce1948dc22d7ad1c0be2887a +þTgclocals·0115f8d53b75c1696444f08ad03251d9þTgclocals·fa051c55663fc115869f36c85a0645b9  +ºþTgclocals·0115f8d53b75c1696444f08ad03251d9þTgclocals·a9282ac20787dc3025c0916068a42263 .þTgclocals·d963a621632aab39c7173f3f69ae3f91èè J  ‚ª°©–i™–©‚¢°©–i™–©   € þTgclocals·8e6ff68ca952ded665cfa894236f9944XX  + + + + + + + + +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·0115f8d53b75c1696444f08ad03251d9þTgclocals·cf86db206769ec68369d07e260728f65 þTgclocals·0115f8d53b75c1696444f08ad03251d9þTgclocals·cf86db206769ec68369d07e260728f65 þTgclocals·770683613b64aeb90b5472e68a988b48  + +þTgclocals·9d97800b9eac7aaad25644c1094f6baa  + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e8c55b930b09fa5028b5e4b78b8932dc +þ,Bgo.itab.*crypto/tls.Conn.net.Connþ&go.string."/events"00/events &go.string."/events"þ*go.string."?since=%d"@4 ?since=%d *go.string."?since=%d"þTgclocals·c1f40b05e3ffba0283c820006999a7cf°°<(¼€(¼(<(Š¨ +¨ +,¨Šª + ¨(¨"¨¢(¢(¢ ¢þTgclocals·8c02cd934f4d00aa05beba150d4d3e04   ¦¦¦¦¦¦¦¦¦¦¦¦¦¦¦¦¦¦þ>go.string."/containers/%s/exec"PH/containers/%s/exec >go.string."/containers/%s/exec"þTgclocals·b0ce568b8ee350283c34690ddf2c68926 |@‚@‚ þTgclocals·5a21f577b603cab6ea76228d95a69547PP&ˆ&ˆ&ˆ&ˆ&ˆ&ˆ&ˆ&ˆþ,8go.itab.*"".NoSuchExec.errorþ4go.string."/exec/%s/start"@>/exec/%s/start 4go.string."/exec/%s/start"þTgclocals·49a517ef1dc6f4c5ce9b594512df2da4¸¸H€< €<À´» ´» ´»i»ûþTgclocals·1bba016e9a05211bda029a0d75dbaa68HHJ»›J»›J»›J»›J»›J»›J»›þ/images/%s/get 4go.string."/images/%s/get"þTgclocals·fabba7188ed1b3b8bac23e1a07c66457@@$/þTgclocals·740354061e4e9c9d9a50f05557f21f54((ÊÊÊþ0go.string."/images/get?"@: /images/get? 0go.string."/images/get?"þTgclocals·d83f68254ae1224f9001a252749abef2((äîþTgclocals·0629ba7d00f7a57ad6e2352df47e7bb3(( + + + þTgclocals·9b3781349ecf8ea1253d7ba626d001b4``" ( €"þTgclocals·10971996d6a01a6d477c3318892d070f88ŠÈnŠÈnŠÈnŠÈnŠÈnþ6go.string."application/tar"@@application/tar 6go.string."application/tar"þ*go.string."/build?%s"@4 /build?%s *go.string."/build?%s"þTgclocals·f991e5818d95c260e9075daec3edcda1èè D üü + +  + +" +þTgclocals·689d5e2b826a4f8fae61c828d739d7d9   >ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢ŠTÉn""¢þ&go.string."/tag?%s"00/tag?%s &go.string."/tag?%s"þTgclocals·c6e5a101f01f70a879acdb3760944b0d((ò€òþTgclocals·23803564b4b262dab15001f621fd3b37((ŠHŠHŠHþTgclocals·2b892b6166a29da84b4f26d3316f1499  +þTgclocals·d7e8a62d22b1cde6d92b17a55c33fe8f þ,>go.itab.*bytes.Buffer.io.Writerþ6go.string."X-Registry-Auth"@@X-Registry-Auth 6go.string."X-Registry-Auth"þ:go.string."X-Registry-Config"PDX-Registry-Config :go.string."X-Registry-Config"þTgclocals·7a03355e34b75c37acf5eff7bf674ad4ààH"ð"ð¢ð"²ð*ð""" ¢ð" ð*ð"þTgclocals·afcded8c13354e18af605d7f21ec25fe€€ þ@go.string."/images/search?term="PJ/images/search?term= @go.string."/images/search?term="þTgclocals·52cf122e9547c47353d18ce23d85402a00þTgclocals·766148fb4da5bf1af59ee4d8b91fb45400 + + + +þTgclocals·41bb44495be0a59dc118277b1d9139f988 +þTgclocals·9cf15d8275d9c299f023024ca604cf9088þ"go.string."/info"0,/info "go.string."/info"þTgclocals·41bb44495be0a59dc118277b1d9139f988 +þTgclocals·9cf15d8275d9c299f023024ca604cf9088þTgclocals·8d600a433c6aaa81a4fe446d95c5546b þTgclocals·ca1ebfc68aaed1d083688775167e5178  þ*go.string."/networks"@4 /networks *go.string."/networks"þTgclocals·ae0db13c3cc1bbe7f0a2ea62d05cf906((þTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216(( þ,>go.itab.*"".NoSuchNetwork.errorþ,go.string."/networks/"@6 +/networks/ ,go.string."/networks/"þTgclocals·0c8fa0fcc4836d09a64d3d20b95663fe00þTgclocals·be34fa03b4e4d696adaf8f647f7704fd00 + + + +þTgclocals·35aa8cef5f531c9de8f76600ceb85b27€€"|""  +þTgclocals·81381a8f40f0e35a38db28a8bb50de11HHŠŠŠŠŠŠŠþ>go.string."No such network: %s"PHNo such network: %s >go.string."No such network: %s"þTgclocals·403a8d79fd24b295e8557f6970497aa3((ððþTgclocals·6d340c3bdac448a6ef1256f331f68dd3((þ2go.string.".dockerignore"@< .dockerignore 2go.string.".dockerignore"þjgo.string."cannot match .dockerfile: '%s', error: %s"€t)cannot match .dockerfile: '%s', error: %s jgo.string."cannot match .dockerfile: '%s', error: %s"þTgclocals·4df3d887804869ca0d16462c47a4175fÈÈ T ‚ (   ò  ò ò(       þTgclocals·d01647b6fcc19f6b40c264ab6c580992xx """""""""""""þTgclocals·7546955fbaa0a8a5c520077bd4d4710500 ""¤"þTgclocals·e3c75ef39e8363f5b00a257bd2be7adb00""""þZgo.string."error reading .dockerignore: '%s'"pd!error reading .dockerignore: '%s' Zgo.string."error reading .dockerignore: '%s'"þgo.string."\n"0$ + go.string."\n"þTgclocals·738657360054077c9c40a6546341a136pp0€"€" ‚€ €þTgclocals·2f519926ed4d9241bccfb3ede7c3f0ba@@þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·a08e9001cb8f9d822225de3b8e406515þ,Bgo.itab.*"".tlsClientCon.net.ConnþTgclocals·a6f85fd4ba75b8cdab35ab28e50023d9  0 + +ˆ+ˆ ( °‚( €( ( (  þTgclocals·0389232f9bf0423206204d8b27e58130˜˜ŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d85453ba2fc2b16513844b65495ea6c3"þ,>go.itab.*bufio.Reader.io.ReaderþTgclocals·51d2fd2674ba9ccfd7abd80151d2e03288¨* þTgclocals·7c13896baab3273e10662a9a37b348ceþTgclocals·2b592d649ecec7c5b5fac74b8e09bee800*"ÀþTgclocals·0372b889336bbdf612862c172920463dþTgclocals·50e42ec547586bf00be346cef54257da88* +À +þTgclocals·7c13896baab3273e10662a9a37b348ceþ,@go.itab.*io.PipeWriter.io.WriterþTgo.string."/containers/%s/stats?stream=%v"`^/containers/%s/stats?stream=%v Tgo.string."/containers/%s/stats?stream=%v"þTgclocals·df3c8560fdbead80e4ddce1ccdbd1147€€ +D  + ü  +ü¨ +üçî ü ² "²°þTgclocals·fb05dbbfacbbe47b8b1eb4226ce34430 +þTgclocals·f34a2133376bc2e71ee31cc35164f3d3@@*jªeZ¦hªeZ¦þTgclocals·73423680ca5f2d7df4fe760a82d507fbþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·48a37d9114fa45f0336e02e754d41f88   (€¨€¨ ˆ¨‚¨‚ª€‚ª ¢¨þTgclocals·8e6ff68ca952ded665cfa894236f9944XX  + + + + + + + + +þ6go.string."can't stat '%s'"@@can't stat '%s' 6go.string."can't stat '%s'"þVgo.string."no permission to read from '%s'"``no permission to read from '%s' Vgo.string."no permission to read from '%s'"þTgclocals·8638ac1ded2e05617036c77f7600dfac``&òòþTgclocals·0e5d6e03d8b052993869281db2167ff788² ² ² ² ² þTgclocals·0730e324c95d53ccaec07bf254f1f51600 + þTgclocals·0372b889336bbdf612862c172920463dþTgclocals·f1ce4f14231620ac9cd58e5cd8e6fa2d((,þTgclocals·73423680ca5f2d7df4fe760a82d507fbþgo.string."EOF"0(EOF go.string."EOF"þpgo.string."Failed to read authentication from dockercfg"€z,Failed to read authentication from dockercfg pgo.string."Failed to read authentication from dockercfg"þ8go.string."invalid endpoint"PBinvalid endpoint 8go.string."invalid endpoint"þZgo.string."cannot connect to Docker endpoint"pd!cannot connect to Docker endpoint Zgo.string."cannot connect to Docker endpoint"þ go.string."1.12"0*1.12 go.string."1.12"þHgo.string."container already exists"`Rcontainer already exists Hgo.string."container already exists"þbgo.string."no listeners present to receive event"pl%no listeners present to receive event bgo.string."no listeners present to receive event"þjgo.string."listener already exists for docker events"€t)listener already exists for docker events jgo.string."listener already exists for docker events"þ2go.string."no such image"@< no such image 2go.string."no such image"þ‚go.string."missing remote repository e.g. 'github.com/user/repo'"Œ5missing remote repository e.g. 'github.com/user/repo' ‚go.string."missing remote repository e.g. 'github.com/user/repo'"þBgo.string."missing output stream"PLmissing output stream Bgo.string."missing output stream"þšgo.string."image build may not be provided BOTH context dir and input stream"°¤Aimage build may not be provided BOTH context dir and input stream šgo.string."image build may not be provided BOTH context dir and input stream"þhgo.string."must specify at least one name to export"€r(must specify at least one name to export hgo.string."must specify at least one name to export"þDgo.string."network already exists"PNnetwork already exists Dgo.string."network already exists"þTgclocals·7b2d1dc8e692ba633cb2c876407e20f2 +þTgclocals·3280bececceccd33cb74587feedb1f9fþ*""".AuthParseError type.errorþ**"".ErrInvalidEndpoint type.errorþ*."".ErrConnectionRefused type.errorþ* "".apiVersion1120$type."".APIVersionþ*8"".ErrContainerAlreadyExists type.errorþ*""".ErrNoListeners type.errorþ*6"".ErrListenerAlreadyExists type.errorþ*"".EOFEvent$type.*"".APIEvents""".statictmp_1673þ*""".ErrNoSuchImage type.errorþ*""".ErrMissingRepo type.errorþ*2"".ErrMissingOutputStream type.errorþ*,"".ErrMultipleContexts type.errorþ*,"".ErrMustSpecifyNames type.errorþ*4"".ErrNetworkAlreadyExists type.errorþ""".statictmp_0068`type.[3]string`   &go.string.".docker"@ .go.string."config.json"þ""".statictmp_0073@type.[2]string@ +  ,go.string.".dockercfg"þ""".statictmp_0222@type.[2]string@  (go.string."cert.pem"þ""".statictmp_0225@type.[2]string@  &go.string."key.pem"þ""".statictmp_0228@type.[2]string@  $go.string."ca.pem"þ""".statictmp_0552@type.[2]string@  &go.string.".docker"þ""".statictmp_0876 *type."".hijackOptionsþ""".statictmp_1200 *type."".hijackOptionsþ""".statictmp_1318*type."".streamOptionsþ""".statictmp_1373*type."".streamOptionsþ""".statictmp_1509 type.[1]string  go.string."."þ""".statictmp_1511@type.[2]string  2go.string.".dockerignore"þ""".statictmp_1545@type.[2]string@  go.string."."þ""".statictmp_1558@type.[2]string@   2go.string.".dockerignore"þ*""".statictmp_1673p"type."".APIEvents  go.string."EOF"þ,"".initdone·type.uint8þP"".NewAuthConfigurationsFromDockerCfg·fJ"".NewAuthConfigurationsFromDockerCfgþos.Getenv·fos.Getenvþ:runtime.writebarrierstring·f4runtime.writebarrierstringþpath.Join·fpath.Joinþ&runtime.typ2Itab·f runtime.typ2Itabþos.Open·fos.Openþ6"".NewAuthConfigurations·f0"".NewAuthConfigurationsþ,runtime.throwreturn·f&runtime.throwreturnþ."".parseDockerConfig·f("".parseDockerConfigþ""".authConfigs·f"".authConfigsþ(runtime.newobject·f"runtime.newobjectþ6bytes.(*Buffer).ReadFrom·f0bytes.(*Buffer).ReadFromþ*runtime.panicslice·f$runtime.panicsliceþ4encoding/json.Unmarshal·f.encoding/json.Unmarshalþ:runtime.mapaccess2_faststr·f4runtime.mapaccess2_faststrþ$runtime.makemap·fruntime.makemapþ4runtime.writebarrierptr·f.runtime.writebarrierptrþ,runtime.mapiterinit·f&runtime.mapiterinitþ,runtime.mapiternext·f&runtime.mapiternextþVencoding/base64.(*Encoding).DecodeString·fPencoding/base64.(*Encoding).DecodeStringþ8runtime.slicebytetostring·f2runtime.slicebytetostringþ strings.Split·fstrings.Splitþ*runtime.panicindex·f$runtime.panicindexþ*runtime.mapassign1·f$runtime.mapassign1þ2"".(*Client).AuthCheck·f,"".(*Client).AuthCheckþfmt.Errorf·ffmt.Errorfþ$"".(*Client).do·f"".(*Client).doþ$runtime.convT2E·fruntime.convT2Eþ8runtime.writebarrieriface·f2runtime.writebarrierifaceþ,"".(*Change).String·f&"".(*Change).Stringþfmt.Sprintf·ffmt.Sprintfþ&"".NewAPIVersion·f "".NewAPIVersionþ&strings.Contains·f strings.Containsþ(runtime.makeslice·f"runtime.makesliceþstrconv.Atoi·fstrconv.Atoiþ."".APIVersion.String·f("".APIVersion.Stringþstrconv.Itoa·fstrconv.Itoaþ0runtime.concatstring2·f*runtime.concatstring2þ2"".APIVersion.LessThan·f,"".APIVersion.LessThanþ0"".APIVersion.compare·f*"".APIVersion.compareþD"".APIVersion.LessThanOrEqualTo·f>"".APIVersion.LessThanOrEqualToþ8"".APIVersion.GreaterThan·f2"".APIVersion.GreaterThanþJ"".APIVersion.GreaterThanOrEqualTo·fD"".APIVersion.GreaterThanOrEqualToþ"".NewClient·f"".NewClientþ0"".NewVersionedClient·f*"".NewVersionedClientþ$"".NewTLSClient·f"".NewTLSClientþ6"".NewVersionedTLSClient·f0"".NewVersionedTLSClientþ6"".NewTLSClientFromBytes·f0"".NewTLSClientFromBytesþH"".NewVersionedTLSClientFromBytes·fB"".NewVersionedTLSClientFromBytesþ&"".parseEndpoint·f "".parseEndpointþ8runtime.writebarrierslice·f2runtime.writebarriersliceþ8"".NewVersionnedTLSClient·f2"".NewVersionnedTLSClientþ*io/ioutil.ReadFile·f$io/ioutil.ReadFileþ,"".NewClientFromEnv·f&"".NewClientFromEnvþ>"".NewVersionedClientFromEnv·f8"".NewVersionedClientFromEnvþ$"".getDockerEnv·f"".getDockerEnvþ"strings.SplitN·fstrings.SplitNþ*path/filepath.Join·f$path/filepath.Joinþ2crypto/tls.X509KeyPair·f,crypto/tls.X509KeyPairþ4runtime.writebarrierfat·f.runtime.writebarrierfatþZcrypto/x509.(*CertPool).AppendCertsFromPEM·fTcrypto/x509.(*CertPool).AppendCertsFromPEMþ>"".(*Client).checkAPIVersion·f8"".(*Client).checkAPIVersionþR"".(*Client).getServerAPIVersionString·fL"".(*Client).getServerAPIVersionStringþ("".(*Client).Ping·f""".(*Client).Pingþ:runtime.mapaccess1_faststr·f4runtime.mapaccess1_faststrþ*runtime.assertE2T2·f$runtime.assertE2T2þ0encoding/json.Marshal·f*encoding/json.Marshalþ,runtime.deferreturn·f&runtime.deferreturnþ&runtime.eqstring·f runtime.eqstringþ,"".(*Client).getURL·f&"".(*Client).getURLþ,net/http.NewRequest·f&net/http.NewRequestþ,net/http.Header.Set·f&net/http.Header.Setþnet.Dial·fnet.Dialþ(runtime.deferproc·f"runtime.deferprocþ$runtime.convI2I·fruntime.convI2Iþ$bufio.NewReader·fbufio.NewReaderþ8net/http.(*Request).Write·f2net/http.(*Request).Writeþ0net/http.ReadResponse·f*net/http.ReadResponseþ0net/http.(*Client).Do·f*net/http.(*Client).Doþ(io/ioutil.ReadAll·f"io/ioutil.ReadAllþ,"".(*Client).stream·f&"".(*Client).streamþtime.Now·ftime.Nowþ time.Time.Add·ftime.Time.Addþ,net/http.Header.Get·f&net/http.Header.Getþio.Copy·fio.CopyþDencoding/json.(*Decoder).Decode·f>encoding/json.(*Decoder).Decodeþ$runtime.ifaceeq·fruntime.ifaceeqþfmt.Fprint·ffmt.Fprintþfmt.Fprintf·ffmt.Fprintfþfmt.Fprintln·ffmt.Fprintlnþ´github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdCopy·f®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdCopyþ,"".(*Client).hijack·f&"".(*Client).hijackþ"".tlsDial·f"".tlsDialþDnet/http/httputil.NewClientConn·f>net/http/httputil.NewClientConnþPnet/http/httputil.(*ClientConn).Close·fJnet/http/httputil.(*ClientConn).CloseþJnet/http/httputil.(*ClientConn).Do·fDnet/http/httputil.(*ClientConn).Doþ(runtime.chansend1·f"runtime.chansend1þ(runtime.chanrecv1·f"runtime.chanrecv1þRnet/http/httputil.(*ClientConn).Hijack·fLnet/http/httputil.(*ClientConn).Hijackþ&runtime.makechan·f runtime.makechanþ"".func·001·f"".func·001þ$runtime.newproc·fruntime.newprocþ"".func·002·f"".func·002þ0net/url.(*URL).String·f*net/url.(*URL).Stringþ(strings.TrimRight·f"strings.TrimRightþ""".queryString·f"".queryStringþ$reflect.ValueOf·freflect.ValueOfþ*reflect.Value.Kind·f$reflect.Value.Kindþ*reflect.Value.Elem·f$reflect.Value.Elemþ2reflect.Value.NumField·f,reflect.Value.NumFieldþ*reflect.Value.Type·f$reflect.Value.Typeþ0reflect.StructTag.Get·f*reflect.StructTag.Getþ$strings.ToLower·fstrings.ToLowerþ,reflect.Value.Field·f&reflect.Value.Fieldþ2"".addQueryStringValue·f,"".addQueryStringValueþ0net/url.Values.Encode·f*net/url.Values.Encodeþ*reflect.Value.Bool·f$reflect.Value.Boolþ(runtime.growslice·f"runtime.growsliceþ(reflect.Value.Int·f"reflect.Value.Intþ(strconv.FormatInt·f"strconv.FormatIntþ,reflect.Value.Float·f&reflect.Value.Floatþ,strconv.FormatFloat·f&strconv.FormatFloatþ.reflect.Value.String·f(reflect.Value.Stringþ,reflect.Value.IsNil·f&reflect.Value.IsNilþ4reflect.Value.Interface·f.reflect.Value.Interfaceþ0reflect.Value.MapKeys·f*reflect.Value.MapKeysþ(reflect.Value.Len·f"reflect.Value.Lenþ,reflect.Value.Index·f&reflect.Value.Indexþ"".newError·f"".newErrorþ("".(*Error).Error·f""".(*Error).Errorþ net/url.Parse·fnet/url.Parseþ(runtime.cmpstring·f"runtime.cmpstringþ(net.SplitHostPort·f"net.SplitHostPortþ*runtime.assertI2T2·f$runtime.assertI2T2þ&strconv.ParseInt·f strconv.ParseIntþ4"".getDefaultDockerHost·f."".getDefaultDockerHostþ¬github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir.Get·f¦github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir.Getþ(path/filepath.Abs·f"path/filepath.Absþ°github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.ValidateHost·fªgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.ValidateHostþ<"".(*Client).ListContainers·f6"".(*Client).ListContainersþ"".Port.Port·f"".Port.Portþ "".Port.Proto·f"".Port.Protoþ*"".(*State).String·f$"".(*State).Stringþ time.Time.Sub·ftime.Time.SubþN"".(*NetworkSettings).PortMappingAPI·fH"".(*NetworkSettings).PortMappingAPIþ"".parsePort·f"".parsePortþ(strconv.ParseUint·f"strconv.ParseUintþ>"".(*Client).RenameContainer·f8"".(*Client).RenameContainerþ0runtime.concatstring3·f*runtime.concatstring3þ@"".(*Client).InspectContainer·f:"".(*Client).InspectContainerþ@"".(*Client).ContainerChanges·f:"".(*Client).ContainerChangesþ>"".(*Client).CreateContainer·f8"".(*Client).CreateContainerþ&"".AlwaysRestart·f "".AlwaysRestartþ,"".RestartOnFailure·f&"".RestartOnFailureþ$"".NeverRestart·f"".NeverRestartþ<"".(*Client).StartContainer·f6"".(*Client).StartContainerþ:"".(*Client).StopContainer·f4"".(*Client).StopContainerþ@"".(*Client).RestartContainer·f:"".(*Client).RestartContainerþ<"".(*Client).PauseContainer·f6"".(*Client).PauseContainerþ@"".(*Client).UnpauseContainer·f:"".(*Client).UnpauseContainerþ8"".(*Client).TopContainer·f2"".(*Client).TopContainerþ*"".(*Client).Stats·f$"".(*Client).Statsþio.Pipe·fio.Pipeþ"".func·003·f"".func·003þ"".func·004·f"".func·004þ(runtime.closechan·f"runtime.closechanþ"".func·005·f"".func·005þ:"".(*Client).KillContainer·f4"".(*Client).KillContainerþ0runtime.concatstring4·f*runtime.concatstring4þ>"".(*Client).RemoveContainer·f8"".(*Client).RemoveContainerþB"".(*Client).CopyFromContainer·f<"".(*Client).CopyFromContainerþ:"".(*Client).WaitContainer·f4"".(*Client).WaitContainerþ>"".(*Client).CommitContainer·f8"".(*Client).CommitContainerþB"".(*Client).AttachToContainer·f<"".(*Client).AttachToContainerþ("".(*Client).Logs·f""".(*Client).LogsþD"".(*Client).ResizeContainerTTY·f>"".(*Client).ResizeContainerTTYþ>"".(*Client).ExportContainer·f8"".(*Client).ExportContainerþ<"".(*NoSuchContainer).Error·f6"".(*NoSuchContainer).ErrorþL"".(*ContainerAlreadyRunning).Error·fF"".(*ContainerAlreadyRunning).ErrorþD"".(*ContainerNotRunning).Error·f>"".(*ContainerNotRunning).Errorþ "".(*Env).Get·f"".(*Env).Getþ "".(*Env).Map·f"".(*Env).Mapþ&"".(*Env).Exists·f "".(*Env).Existsþ("".(*Env).GetBool·f""".(*Env).GetBoolþstrings.Trim·fstrings.Trimþ("".(*Env).SetBool·f""".(*Env).SetBoolþ&"".(*Env).GetInt·f "".(*Env).GetIntþ*"".(*Env).GetInt64·f$"".(*Env).GetInt64þ&"".(*Env).SetInt·f "".(*Env).SetIntþ*"".(*Env).SetInt64·f$"".(*Env).SetInt64þ("".(*Env).GetJSON·f""".(*Env).GetJSONþ8runtime.stringtoslicebyte·f2runtime.stringtoslicebyteþ("".(*Env).SetJSON·f""".(*Env).SetJSONþ("".(*Env).GetList·f""".(*Env).GetListþ("".(*Env).SetList·f""".(*Env).SetListþ "".(*Env).Set·f"".(*Env).Setþ&"".(*Env).Decode·f "".(*Env).Decodeþ("".(*Env).SetAuto·f""".(*Env).SetAutoþ@"".(*Client).AddEventListener·f:"".(*Client).AddEventListenerþN"".(*eventMonitoringState).isEnabled·fH"".(*eventMonitoringState).isEnabledþf"".(*eventMonitoringState).enableEventMonitoring·f`"".(*eventMonitoringState).enableEventMonitoringþR"".(*eventMonitoringState).addListener·fL"".(*eventMonitoringState).addListenerþF"".(*Client).RemoveEventListener·f@"".(*Client).RemoveEventListenerþX"".(*eventMonitoringState).removeListener·fR"".(*eventMonitoringState).removeListenerþh"".(*eventMonitoringState).disableEventMonitoring·fb"".(*eventMonitoringState).disableEventMonitoringþ.sync.(*RWMutex).Lock·f(sync.(*RWMutex).Lockþ2sync.(*RWMutex).Unlock·f,sync.(*RWMutex).Unlockþ("".listenerExists·f""".listenerExistsþ0sync.(*WaitGroup).Add·f*sync.(*WaitGroup).AddþX"".(*eventMonitoringState).closeListeners·fR"".(*eventMonitoringState).closeListenersþV"".(*eventMonitoringState).monitorEvents·fP"".(*eventMonitoringState).monitorEventsþ2sync.(*WaitGroup).Wait·f,sync.(*WaitGroup).WaitþR"".(*eventMonitoringState).noListeners·fL"".(*eventMonitoringState).noListenersþtime.Sleep·ftime.Sleepþ\"".(*eventMonitoringState).connectWithRetry·fV"".(*eventMonitoringState).connectWithRetryþtime.After·ftime.Afterþ(runtime.newselect·f"runtime.newselectþ,runtime.selectrecv2·f&runtime.selectrecv2þX"".(*eventMonitoringState).updateLastSeen·fR"".(*eventMonitoringState).updateLastSeenþN"".(*eventMonitoringState).sendEvent·fH"".(*eventMonitoringState).sendEventþ*runtime.selectrecv·f$runtime.selectrecvþ"".func·006·f"".func·006þ&runtime.selectgo·f runtime.selectgoþ0sync/atomic.LoadInt64·f*sync/atomic.LoadInt64þ6"".(*Client).eventHijack·f0"".(*Client).eventHijackþmath.Pow·fmath.Powþ0sync.(*RWMutex).RLock·f*sync.(*RWMutex).RLockþ4sync.(*RWMutex).RUnlock·f.sync.(*RWMutex).RUnlockþ2sync.(*WaitGroup).Done·f,sync.(*WaitGroup).Doneþ2sync/atomic.StoreInt64·f,sync/atomic.StoreInt64þ$crypto/tls.Dial·fcrypto/tls.Dialþ"".func·007·f"".func·007þ4"".(*Client).CreateExec·f."".(*Client).CreateExecþ2"".(*Client).StartExec·f,"".(*Client).StartExecþ:"".(*Client).ResizeExecTTY·f4"".(*Client).ResizeExecTTYþ6"".(*Client).InspectExec·f0"".(*Client).InspectExecþ2"".(*NoSuchExec).Error·f,"".(*NoSuchExec).Errorþ4"".(*Client).ListImages·f."".(*Client).ListImagesþ8"".(*Client).ImageHistory·f2"".(*Client).ImageHistoryþ6"".(*Client).RemoveImage·f0"".(*Client).RemoveImageþF"".(*Client).RemoveImageExtended·f@"".(*Client).RemoveImageExtendedþ8"".(*Client).InspectImage·f2"".(*Client).InspectImageþ6runtime.writebarrierfat3·f0runtime.writebarrierfat3þ2"".(*Client).PushImage·f,"".(*Client).PushImageþ*"".headersWithAuth·f$"".headersWithAuthþ2"".(*Client).PullImage·f,"".(*Client).PullImageþ6"".(*Client).createImage·f0"".(*Client).createImageþ2"".(*Client).LoadImage·f,"".(*Client).LoadImageþ6"".(*Client).ExportImage·f0"".(*Client).ExportImageþ8"".(*Client).ExportImages·f2"".(*Client).ExportImagesþ6"".(*Client).ImportImage·f0"".(*Client).ImportImageþ"".isURL·f"".isURLþ4"".(*Client).BuildImage·f."".(*Client).BuildImageþ*"".createTarStream·f$"".createTarStreamþ0"".(*Client).TagImage·f*"".(*Client).TagImageþ*runtime.efacethash·f$runtime.efacethashþ,runtime.assertE2TOK·f&runtime.assertE2TOKþDencoding/json.(*Encoder).Encode·f>encoding/json.(*Encoder).EncodeþZencoding/base64.(*Encoding).EncodeToString·fTencoding/base64.(*Encoding).EncodeToStringþ8"".(*Client).SearchImages·f2"".(*Client).SearchImagesþ."".(*Client).Version·f("".(*Client).Versionþ("".(*Client).Info·f""".(*Client).Infoþ0"".ParseRepositoryTag·f*"".ParseRepositoryTagþ(strings.LastIndex·f"strings.LastIndexþ8"".(*Client).ListNetworks·f2"".(*Client).ListNetworksþ6"".(*Client).NetworkInfo·f0"".(*Client).NetworkInfoþ:"".(*Client).CreateNetwork·f4"".(*Client).CreateNetworkþ8"".(*NoSuchNetwork).Error·f2"".(*NoSuchNetwork).Errorþ."".parseDockerignore·f("".parseDockerignoreþ¸github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.Matches·f²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.Matchesþ$runtime.convI2E·fruntime.convI2Eþ<"".validateContextDirectory·f6"".validateContextDirectoryþÂgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.TarWithOptions·f¼github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.TarWithOptionsþ"".func·008·f"".func·008þ*path/filepath.Walk·f$path/filepath.Walkþ os.IsNotExist·fos.IsNotExistþ@"".(*tlsClientCon).CloseWrite·f:"".(*tlsClientCon).CloseWriteþ*runtime.assertI2I2·f$runtime.assertI2I2þ."".tlsDialWithDialer·f("".tlsDialWithDialerþ"".func·009·f"".func·009þ"time.AfterFunc·ftime.AfterFuncþ*net.(*Dialer).Dial·f$net.(*Dialer).Dialþ>crypto/tls.(*Conn).Handshake·f8crypto/tls.(*Conn).Handshakeþ"".func·010·f"".func·010þ(runtime.assertI2I·f"runtime.assertI2Iþ.runtime.selectnbrecv·f(runtime.selectnbrecvþ2io.(*PipeReader).Close·f,io.(*PipeReader).Closeþ2io.(*PipeWriter).Close·f,io.(*PipeWriter).Closeþ(path/filepath.Rel·f"path/filepath.Relþ$os.IsPermission·fos.IsPermissionþ&os.(*File).Close·f os.(*File).Closeþ"".init·f"".initþ(runtime.throwinit·f"runtime.throwinitþ²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.init·f¬github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils.initþ®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.init·f¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive.initþsync.init·fsync.initþmath.init·fmath.initþ®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.init·f¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.initþ®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir.init·f¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir.initþ github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.init·fšgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts.initþtime.init·ftime.initþstrconv.init·fstrconv.initþruntime.init·fruntime.initþreflect.init·freflect.initþ*path/filepath.init·f$path/filepath.initþnet/url.init·fnet/url.initþ2net/http/httputil.init·f,net/http/httputil.initþ net/http.init·fnet/http.initþnet.init·fnet.initþ"io/ioutil.init·fio/ioutil.initþ&crypto/x509.init·f crypto/x509.initþ$crypto/tls.init·fcrypto/tls.initþbufio.init·fbufio.initþstrings.init·fstrings.initþpath.init·fpath.initþos.init·fos.initþio.init·fio.initþfmt.init·ffmt.initþ*encoding/json.init·f$encoding/json.initþ.encoding/base64.init·f(encoding/base64.initþbytes.init·fbytes.initþerrors.New·ferrors.NewþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þFtype..hashfunc."".AuthConfiguration>type..hash."".AuthConfigurationþBtype..eqfunc."".AuthConfiguration:type..eq."".AuthConfigurationþ &type..alg.[8]string0bruntime.gcbits.0x48484848484848480000000000000000P*go.string."[8]string"p.go.weak.type.*[8]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[8]string/[8]stringtype.[8]stringþLgo.string."[]docker.AuthConfiguration"`V[]docker.AuthConfiguration Lgo.string."[]docker.AuthConfiguration"þ6type.[]"".AuthConfiguration  Ã+ü„   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PLgo.string."[]docker.AuthConfiguration"pHgo.weak.type.*[]"".AuthConfiguration€"runtime.zerovalue2type."".AuthConfigurationþzgo.typelink.[]docker.AuthConfiguration/[]"".AuthConfiguration6type.[]"".AuthConfigurationþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·65526a5f07004f02424fe51b799cdd23  +þTgclocals·fa7203fd5ed88aea99b7be572f707eb0 þLtype..hashfunc.[8]"".AuthConfigurationDtype..hash.[8]"".AuthConfigurationþHtype..eqfunc.[8]"".AuthConfiguration@type..eq.[8]"".AuthConfigurationþBtype..alg.[8]"".AuthConfiguration Ltype..hashfunc.[8]"".AuthConfigurationHtype..eqfunc.[8]"".AuthConfigurationþ,@type..gc.[8]"".AuthConfigurationBþHtype..gcprog.[8]"".AuthConfigurationffþNgo.string."[8]docker.AuthConfiguration"`X[8]docker.AuthConfiguration Ngo.string."[8]docker.AuthConfiguration"þ8type.[8]"".AuthConfigurationÀÀ¬ÓX]Q Btype..alg.[8]"".AuthConfiguration0@type..gc.[8]"".AuthConfiguration@Htype..gcprog.[8]"".AuthConfigurationPNgo.string."[8]docker.AuthConfiguration"pJgo.weak.type.*[8]"".AuthConfiguration€"runtime.zerovalue2type."".AuthConfiguration 6type.[]"".AuthConfigurationþ~go.typelink.[8]docker.AuthConfiguration/[8]"".AuthConfiguration8type.[8]"".AuthConfigurationþngo.string."*map.bucket[string]docker.AuthConfiguration"€x+*map.bucket[string]docker.AuthConfiguration ngo.string."*map.bucket[string]docker.AuthConfiguration"þXtype.*map.bucket[string]"".AuthConfiguration  iÇg/6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."*map.bucket[string]docker.AuthConfiguration"pjgo.weak.type.**map.bucket[string]"".AuthConfiguration€"runtime.zerovalueVtype.map.bucket[string]"".AuthConfigurationþ,^type..gc.map.bucket[string]"".AuthConfigurationTþftype..gcprog.map.bucket[string]"".AuthConfiguration22™™™™ffþlgo.string."map.bucket[string]docker.AuthConfiguration"€v*map.bucket[string]docker.AuthConfiguration lgo.string."map.bucket[string]docker.AuthConfiguration"þ go.string."keys"0*keys go.string."keys"þ$go.string."values"0.values $go.string."values"þ(go.string."overflow"@2overflow (go.string."overflow"þVtype.map.bucket[string]"".AuthConfiguration°°Ô»ûºYˆˆ à runtime.algarray0^type..gc.map.bucket[string]"".AuthConfiguration@ftype..gcprog.map.bucket[string]"".AuthConfigurationPlgo.string."map.bucket[string]docker.AuthConfiguration"phgo.weak.type.*map.bucket[string]"".AuthConfiguration€"runtime.zerovalueÀVtype.map.bucket[string]"".AuthConfigurationÀ go.string."keys"àtype.[8]string$go.string."values"°8type.[8]"".AuthConfigurationà(go.string."overflow"€Xtype.*map.bucket[string]"".AuthConfigurationþbruntime.gcbits.0x44844800000000000000000000000000 D„Hþfgo.string."map.hdr[string]docker.AuthConfiguration"pp'map.hdr[string]docker.AuthConfiguration fgo.string."map.hdr[string]docker.AuthConfiguration"þ&go.string."buckets"00buckets &go.string."buckets"þ,go.string."oldbuckets"@6 +oldbuckets ,go.string."oldbuckets"þPtype.map.hdr[string]"".AuthConfigurationàà0¸çR  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000Pfgo.string."map.hdr[string]docker.AuthConfiguration"pbgo.weak.type.*map.hdr[string]"".AuthConfiguration€"runtime.zerovalueÀPtype.map.hdr[string]"".AuthConfigurationÀ&go.string."buckets"àXtype.*map.bucket[string]"".AuthConfiguration,go.string."oldbuckets"°Xtype.*map.bucket[string]"".AuthConfigurationþ^go.string."map[string]docker.AuthConfiguration"ph#map[string]docker.AuthConfiguration ^go.string."map[string]docker.AuthConfiguration"þHtype.map[string]"".AuthConfigurationÜÜÊé:c5@ € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."map[string]docker.AuthConfiguration"pZgo.weak.type.*map[string]"".AuthConfiguration€"runtime.zerovaluetype.string 2type."".AuthConfiguration°Vtype.map.bucket[string]"".AuthConfigurationÀPtype.map.hdr[string]"".AuthConfigurationþžgo.typelink.map[string]docker.AuthConfiguration/map[string]"".AuthConfigurationHtype.map[string]"".AuthConfigurationþJgo.string."docker.AuthConfigurations"`Tdocker.AuthConfigurations Jgo.string."docker.AuthConfigurations"þ&go.string."Configs"00Configs &go.string."Configs"þ8go.string."json:\"configs\""@>json:"configs" 8go.string."json:\"configs\""þgo.typelink.[3]string/[3]stringtype.[3]stringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc.[2]string(type..hash.[2]stringþ,type..eqfunc.[2]string$type..eq.[2]stringþ&type..alg.[2]string 0type..hashfunc.[2]string,type..eqfunc.[2]stringþbruntime.gcbits.0x48480000000000000000000000000000 HHþ*go.string."[2]string"@4 [2]string *go.string."[2]string"þtype.[2]stringÀÀ PXåé &type..alg.[2]string0bruntime.gcbits.0x48480000000000000000000000000000P*go.string."[2]string"p.go.weak.type.*[2]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[2]string/[2]stringtype.[2]stringþ,go.string."*[3]string"@6 +*[3]string ,go.string."*[3]string"þtype.*[3]string   ++é 6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[3]string"p0go.weak.type.**[3]string€"runtime.zerovaluetype.[3]stringþ,go.string."*[2]string"@6 +*[2]string ,go.string."*[2]string"þtype.*[2]string   f<6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[2]string"p0go.weak.type.**[2]string€"runtime.zerovaluetype.[2]stringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.string."docker.dockerConfig"PHdocker.dockerConfig >go.string."docker.dockerConfig"þ go.string."Auth"0*Auth go.string."Auth"þ2go.string."json:\"auth\""@8 json:"auth" 2go.string."json:\"auth\""þ4go.string."json:\"email\""@: json:"email" 4go.string."json:\"email\""þ0go.string."dockerConfig"@: dockerConfig 0go.string."dockerConfig"þ(type."".dockerConfig°° 0µ®µ 2type..alg."".dockerConfig0bruntime.gcbits.0x48480000000000000000000000000000P>go.string."docker.dockerConfig"p*type.*"".dockerConfig€"runtime.zerovalueÀ(type."".dockerConfigÀ go.string."Auth"àtype.stringð2go.string."json:\"auth\"""go.string."Email"°type.stringÀ4go.string."json:\"email\""`à(type."".dockerConfigà0go.string."dockerConfig"ð"go.importpath."".€°(type."".dockerConfigþBgo.string."[]docker.dockerConfig"PL[]docker.dockerConfig Bgo.string."[]docker.dockerConfig"þ,type.[]"".dockerConfig  =hl   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PBgo.string."[]docker.dockerConfig"p>go.weak.type.*[]"".dockerConfig€"runtime.zerovalue(type."".dockerConfigþfgo.typelink.[]docker.dockerConfig/[]"".dockerConfig,type.[]"".dockerConfigþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·65526a5f07004f02424fe51b799cdd23  +þTgclocals·fa7203fd5ed88aea99b7be572f707eb0 þBtype..hashfunc.[8]"".dockerConfig:type..hash.[8]"".dockerConfigþ>type..eqfunc.[8]"".dockerConfig6type..eq.[8]"".dockerConfigþ8type..alg.[8]"".dockerConfig Btype..hashfunc.[8]"".dockerConfig>type..eqfunc.[8]"".dockerConfigþbruntime.gcbits.0x48484848484848484848484848484848 HHHHHHHHHHHHHHHHþDgo.string."[8]docker.dockerConfig"PN[8]docker.dockerConfig Dgo.string."[8]docker.dockerConfig"þ.type.[8]"".dockerConfigÀÀöeió 8type..alg.[8]"".dockerConfig0bruntime.gcbits.0x48484848484848484848484848484848PDgo.string."[8]docker.dockerConfig"p@go.weak.type.*[8]"".dockerConfig€"runtime.zerovalue(type."".dockerConfig ,type.[]"".dockerConfigþjgo.typelink.[8]docker.dockerConfig/[8]"".dockerConfig.type.[8]"".dockerConfigþdgo.string."*map.bucket[string]docker.dockerConfig"pn&*map.bucket[string]docker.dockerConfig dgo.string."*map.bucket[string]docker.dockerConfig"þNtype.*map.bucket[string]"".dockerConfig  êíÄÀ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pdgo.string."*map.bucket[string]docker.dockerConfig"p`go.weak.type.**map.bucket[string]"".dockerConfig€"runtime.zerovalueLtype.map.bucket[string]"".dockerConfigþ,Ttype..gc.map.bucket[string]"".dockerConfig4þ\type..gcprog.map.bucket[string]"".dockerConfig00™™™™fþbgo.string."map.bucket[string]docker.dockerConfig"pl%map.bucket[string]docker.dockerConfig bgo.string."map.bucket[string]docker.dockerConfig"þLtype.map.bucket[string]"".dockerConfig°°®)tYˆˆ à runtime.algarray0Ttype..gc.map.bucket[string]"".dockerConfig@\type..gcprog.map.bucket[string]"".dockerConfigPbgo.string."map.bucket[string]docker.dockerConfig"p^go.weak.type.*map.bucket[string]"".dockerConfig€"runtime.zerovalueÀLtype.map.bucket[string]"".dockerConfigÀ go.string."keys"àtype.[8]string$go.string."values"°.type.[8]"".dockerConfigà(go.string."overflow"€Ntype.*map.bucket[string]"".dockerConfigþ\go.string."map.hdr[string]docker.dockerConfig"pf"map.hdr[string]docker.dockerConfig \go.string."map.hdr[string]docker.dockerConfig"þFtype.map.hdr[string]"".dockerConfigàà0Kå  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000P\go.string."map.hdr[string]docker.dockerConfig"pXgo.weak.type.*map.hdr[string]"".dockerConfig€"runtime.zerovalueÀFtype.map.hdr[string]"".dockerConfigÀ&go.string."buckets"àNtype.*map.bucket[string]"".dockerConfig,go.string."oldbuckets"°Ntype.*map.bucket[string]"".dockerConfigþTgo.string."map[string]docker.dockerConfig"`^map[string]docker.dockerConfig Tgo.string."map[string]docker.dockerConfig"þ>type.map[string]"".dockerConfigÜÜH¦Â5  € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."map[string]docker.dockerConfig"pPgo.weak.type.*map[string]"".dockerConfig€"runtime.zerovaluetype.string (type."".dockerConfig°Ltype.map.bucket[string]"".dockerConfigÀFtype.map.hdr[string]"".dockerConfigþŠgo.typelink.map[string]docker.dockerConfig/map[string]"".dockerConfig>type.map[string]"".dockerConfigþXgo.string."[]map[string]docker.dockerConfig"pb []map[string]docker.dockerConfig Xgo.string."[]map[string]docker.dockerConfig"þBtype.[]map[string]"".dockerConfig  ¡¡   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PXgo.string."[]map[string]docker.dockerConfig"pTgo.weak.type.*[]map[string]"".dockerConfig€"runtime.zerovalue>type.map[string]"".dockerConfigþ’go.typelink.[]map[string]docker.dockerConfig/[]map[string]"".dockerConfigBtype.[]map[string]"".dockerConfigþbruntime.gcbits.0x88888888000000000000000000000000 ˆˆˆˆþZgo.string."[8]map[string]docker.dockerConfig"pd![8]map[string]docker.dockerConfig Zgo.string."[8]map[string]docker.dockerConfig"þDtype.[8]map[string]"".dockerConfigÀÀ@Þû¼‰ à runtime.algarray0bruntime.gcbits.0x88888888000000000000000000000000PZgo.string."[8]map[string]docker.dockerConfig"pVgo.weak.type.*[8]map[string]"".dockerConfig€"runtime.zerovalue>type.map[string]"".dockerConfig Btype.[]map[string]"".dockerConfigþ–go.typelink.[8]map[string]docker.dockerConfig/[8]map[string]"".dockerConfigDtype.[8]map[string]"".dockerConfigþzgo.string."*map.bucket[string]map[string]docker.dockerConfig"„1*map.bucket[string]map[string]docker.dockerConfig zgo.string."*map.bucket[string]map[string]docker.dockerConfig"þdtype.*map.bucket[string]map[string]"".dockerConfig  aØ®µ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pzgo.string."*map.bucket[string]map[string]docker.dockerConfig"pvgo.weak.type.**map.bucket[string]map[string]"".dockerConfig€"runtime.zerovaluebtype.map.bucket[string]map[string]"".dockerConfigþbruntime.gcbits.0x84848484848484848488888888000000 „„„„„„„„„ˆˆˆˆþxgo.string."map.bucket[string]map[string]docker.dockerConfig"‚0map.bucket[string]map[string]docker.dockerConfig xgo.string."map.bucket[string]map[string]docker.dockerConfig"þbtype.map.bucket[string]map[string]"".dockerConfig°°Ð&ÎÏÉˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000Pxgo.string."map.bucket[string]map[string]docker.dockerConfig"ptgo.weak.type.*map.bucket[string]map[string]"".dockerConfig€"runtime.zerovalueÀbtype.map.bucket[string]map[string]"".dockerConfigÀ go.string."keys"àtype.[8]string$go.string."values"°Dtype.[8]map[string]"".dockerConfigà(go.string."overflow"€dtype.*map.bucket[string]map[string]"".dockerConfigþrgo.string."map.hdr[string]map[string]docker.dockerConfig"€|-map.hdr[string]map[string]docker.dockerConfig rgo.string."map.hdr[string]map[string]docker.dockerConfig"þ\type.map.hdr[string]map[string]"".dockerConfigàà0+;Ÿ  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000Prgo.string."map.hdr[string]map[string]docker.dockerConfig"pngo.weak.type.*map.hdr[string]map[string]"".dockerConfig€"runtime.zerovalueÀ\type.map.hdr[string]map[string]"".dockerConfigÀ&go.string."buckets"àdtype.*map.bucket[string]map[string]"".dockerConfig,go.string."oldbuckets"°dtype.*map.bucket[string]map[string]"".dockerConfigþjgo.string."map[string]map[string]docker.dockerConfig"€t)map[string]map[string]docker.dockerConfig jgo.string."map[string]map[string]docker.dockerConfig"þTtype.map[string]map[string]"".dockerConfigÜÜOÈ95Ð € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."map[string]map[string]docker.dockerConfig"pfgo.weak.type.*map[string]map[string]"".dockerConfig€"runtime.zerovaluetype.string >type.map[string]"".dockerConfig°btype.map.bucket[string]map[string]"".dockerConfigÀ\type.map.hdr[string]map[string]"".dockerConfigþ¶go.typelink.map[string]map[string]docker.dockerConfig/map[string]map[string]"".dockerConfigTtype.map[string]map[string]"".dockerConfigþlgo.string."*map[string]map[string]docker.dockerConfig"€v**map[string]map[string]docker.dockerConfig lgo.string."*map[string]map[string]docker.dockerConfig"þVtype.*map[string]map[string]"".dockerConfig  Ýu6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."*map[string]map[string]docker.dockerConfig"phgo.weak.type.**map[string]map[string]"".dockerConfig€"runtime.zerovalueTtype.map[string]map[string]"".dockerConfigþVgo.string."*map[string]docker.dockerConfig"``*map[string]docker.dockerConfig Vgo.string."*map[string]docker.dockerConfig"þ@type.*map[string]"".dockerConfig  ¥Æò6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PVgo.string."*map[string]docker.dockerConfig"pRgo.weak.type.**map[string]"".dockerConfig€"runtime.zerovalue>type.map[string]"".dockerConfigþ&go.string."[]uint8"00[]uint8 &go.string."[]uint8"þtype.[]uint8  ß~.8   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P&go.string."[]uint8"p*go.weak.type.*[]uint8€"runtime.zerovaluetype.uint8þ6go.typelink.[]uint8/[]uint8type.[]uint8þ^go.string."*map.hdr[string]docker.dockerConfig"ph#*map.hdr[string]docker.dockerConfig ^go.string."*map.hdr[string]docker.dockerConfig"þHtype.*map.hdr[string]"".dockerConfig  B²®Õ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."*map.hdr[string]docker.dockerConfig"pZgo.weak.type.**map.hdr[string]"".dockerConfig€"runtime.zerovalueFtype.map.hdr[string]"".dockerConfigþ*go.string."[]uintptr"@4 []uintptr *go.string."[]uintptr"þtype.[]uintptr  »3À]   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P*go.string."[]uintptr"p.go.weak.type.*[]uintptr€"runtime.zerovaluetype.uintptrþ>go.typelink.[]uintptr/[]uintptrtype.[]uintptrþ^runtime.gcbits.0x000000000000000000000000000000 þ,go.string."[4]uintptr"@6 +[4]uintptr ,go.string."[4]uintptr"þtype.[4]uintptrÀÀ l<‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P,go.string."[4]uintptr"p0go.weak.type.*[4]uintptr€"runtime.zerovaluetype.uintptr type.[]uintptrþBgo.typelink.[4]uintptr/[4]uintptrtype.[4]uintptrþbruntime.gcbits.0x88888844440000000000000000000000 ˆˆˆDDþ^go.string."map.iter[string]docker.dockerConfig"ph#map.iter[string]docker.dockerConfig ^go.string."map.iter[string]docker.dockerConfig"þgo.string."key"0(key go.string."key"þgo.string."val"0(val go.string."val"þgo.string."t"0$t go.string."t"þ go.string."bptr"0*bptr go.string."bptr"þ"go.string."other"0,other "go.string."other"þHtype.map.iter[string]"".dockerConfigððPîšb$ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000P^go.string."map.iter[string]docker.dockerConfig"pZgo.weak.type.*map.iter[string]"".dockerConfig€"runtime.zerovalueÀHtype.map.iter[string]"".dockerConfigÀgo.string."key"àtype.*stringgo.string."val"°*type.*"".dockerConfigàgo.string."t"€type.*uint8°go.string."h"ÐHtype.*map.hdr[string]"".dockerConfig€&go.string."buckets" Ntype.*map.bucket[string]"".dockerConfigÐ go.string."bptr"ðNtype.*map.bucket[string]"".dockerConfig "go.string."other"Àtype.[4]uintptrþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ6type..hashfunc."".APIEvents.type..hash."".APIEventsþ2type..eqfunc."".APIEvents*type..eq."".APIEventsþ,type..alg."".APIEvents 6type..hashfunc."".APIEvents2type..eqfunc."".APIEventsþbruntime.gcbits.0x48484884848444000000000000000000 HHH„„„Dþ8go.string."docker.APIEvents"PBdocker.APIEvents 8go.string."docker.APIEvents"þ$go.string."Status"0.Status $go.string."Status"þ~go.string."json:\"Status,omitempty\" yaml:\"Status,omitempty\""€€/json:"Status,omitempty" yaml:"Status,omitempty" ~go.string."json:\"Status,omitempty\" yaml:\"Status,omitempty\""þgo.string."ID"0&ID go.string."ID"þngo.string."json:\"ID,omitempty\" yaml:\"ID,omitempty\""pp'json:"ID,omitempty" yaml:"ID,omitempty" ngo.string."json:\"ID,omitempty\" yaml:\"ID,omitempty\""þ go.string."From"0*From go.string."From"þvgo.string."json:\"From,omitempty\" yaml:\"From,omitempty\""€x+json:"From,omitempty" yaml:"From,omitempty" vgo.string."json:\"From,omitempty\" yaml:\"From,omitempty\""þ go.string."Time"0*Time go.string."Time"þvgo.string."json:\"Time,omitempty\" yaml:\"Time,omitempty\""€x+json:"Time,omitempty" yaml:"Time,omitempty" vgo.string."json:\"Time,omitempty\" yaml:\"Time,omitempty\""þ*go.string."APIEvents"@4 APIEvents *go.string."APIEvents"þ"type."".APIEventsÐÐ8™€èÛ 0, ,type..alg."".APIEvents0bruntime.gcbits.0x48484884848444000000000000000000P8go.string."docker.APIEvents"p$type.*"".APIEvents€"runtime.zerovalueÀ"type."".APIEventsÀ$go.string."Status"àtype.stringð~go.string."json:\"Status,omitempty\" yaml:\"Status,omitempty\""go.string."ID"°type.stringÀngo.string."json:\"ID,omitempty\" yaml:\"ID,omitempty\""à go.string."From"€type.stringvgo.string."json:\"From,omitempty\" yaml:\"From,omitempty\""° go.string."Time"Ðtype.int64àvgo.string."json:\"Time,omitempty\" yaml:\"Time,omitempty\""`€"type."".APIEvents€*go.string."APIEvents""go.importpath."". Ð"type."".APIEventsþ:go.string."*docker.APIEvents"PD*docker.APIEvents :go.string."*docker.APIEvents"þ$type.*"".APIEvents  ¶¬6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.APIEvents"p6go.weak.type.**"".APIEvents€"runtime.zerovalue"type."".APIEventsþDgo.string."chan *docker.APIEvents"PNchan *docker.APIEvents Dgo.string."chan *docker.APIEvents"þ.type.chan *"".APIEvents°°P@ÖD2   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."chan *docker.APIEvents"p@go.weak.type.*chan *"".APIEvents€"runtime.zerovalue$type.*"".APIEventsþjgo.typelink.chan *docker.APIEvents/chan *"".APIEvents.type.chan *"".APIEventsþ,go.string."chan error"@6 +chan error ,go.string."chan error"þtype.chan error°°"Èû]2   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."chan error"p0go.weak.type.*chan error€"runtime.zerovaluetype.errorþBgo.typelink.chan error/chan errortype.chan errorþHgo.string."chan<- *docker.APIEvents"`Rchan<- *docker.APIEvents Hgo.string."chan<- *docker.APIEvents"þ2type.chan<- *"".APIEvents°° ½Q2   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."chan<- *docker.APIEvents"pDgo.weak.type.*chan<- *"".APIEvents€"runtime.zerovalue$type.*"".APIEventsþrgo.typelink.chan<- *docker.APIEvents/chan<- *"".APIEvents2type.chan<- *"".APIEventsþLgo.string."[]chan<- *docker.APIEvents"`V[]chan<- *docker.APIEvents Lgo.string."[]chan<- *docker.APIEvents"þ6type.[]chan<- *"".APIEvents  %é¶=   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PLgo.string."[]chan<- *docker.APIEvents"pHgo.weak.type.*[]chan<- *"".APIEvents€"runtime.zerovalue2type.chan<- *"".APIEventsþzgo.typelink.[]chan<- *docker.APIEvents/[]chan<- *"".APIEvents6type.[]chan<- *"".APIEventsþbruntime.gcbits.0x44448484884844444448888844000000 DD„„ˆHDDDHˆˆDþNgo.string."docker.eventMonitoringState"`Xdocker.eventMonitoringState Ngo.string."docker.eventMonitoringState"þ&go.string."enabled"00enabled &go.string."enabled"þ(go.string."lastSeen"@2lastSeen (go.string."lastSeen"þ go.string."errC"0*errC go.string."errC"þ*go.string."listeners"@4 listeners *go.string."listeners"þ@go.string."eventMonitoringState"PJeventMonitoringState @go.string."eventMonitoringState"þ8type."".eventMonitoringStateÀÀh^}’ 08@HP4 à runtime.algarray0bruntime.gcbits.0x44448484884844444448888844000000PNgo.string."docker.eventMonitoringState"p:type.*"".eventMonitoringState€"runtime.zerovalueÀ8type."".eventMonitoringStateà"type.sync.RWMutex°&type.sync.WaitGroupà&go.string."enabled"ð"go.importpath."".€type.bool°(go.string."lastSeen"À"go.importpath."".Ðtype.*int64€go.string."C" .type.chan *"".APIEventsÐ go.string."errC"à"go.importpath."".ðtype.chan error *go.string."listeners"°"go.importpath."".À6type.[]chan<- *"".APIEvents`ð8type."".eventMonitoringStateð@go.string."eventMonitoringState"€"go.importpath."".À8type."".eventMonitoringStateþPgo.string."*docker.eventMonitoringState"`Z*docker.eventMonitoringState Pgo.string."*docker.eventMonitoringState"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·cd30d2bcfdea04ed7c49639580b4bd08þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þfgo.string."func(*docker.eventMonitoringState, int)"pp'func(*docker.eventMonitoringState, int) fgo.string."func(*docker.eventMonitoringState, int)"þPtype.func(*"".eventMonitoringState, int)   @{13 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(*docker.eventMonitoringState, int)"pbgo.weak.type.*func(*"".eventMonitoringState, int)€"runtime.zerovalue €Ptype.func(*"".eventMonitoringState, int)РPtype.func(*"".eventMonitoringState, int)€:type.*"".eventMonitoringStatetype.intþ\go.string."func(*docker.eventMonitoringState)"pf"func(*docker.eventMonitoringState) \go.string."func(*docker.eventMonitoringState)"þFtype.func(*"".eventMonitoringState)uÈv3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(*docker.eventMonitoringState)"pXgo.weak.type.*func(*"".eventMonitoringState)€"runtime.zerovalue €Ftype.func(*"".eventMonitoringState)ÐFtype.func(*"".eventMonitoringState)€:type.*"".eventMonitoringStateþtgo.string."func(*docker.eventMonitoringState) sync.Locker"€~.func(*docker.eventMonitoringState) sync.Locker tgo.string."func(*docker.eventMonitoringState) sync.Locker"þ^type.func(*"".eventMonitoringState) sync.Locker  äS—3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."func(*docker.eventMonitoringState) sync.Locker"ppgo.weak.type.*func(*"".eventMonitoringState) sync.Locker€"runtime.zerovalue €^type.func(*"".eventMonitoringState) sync.LockerÐ^type.func(*"".eventMonitoringState) sync.Locker€:type.*"".eventMonitoringState type.sync.Lockerþœgo.string."func(*docker.eventMonitoringState, chan<- *docker.APIEvents) error"°¦Bfunc(*docker.eventMonitoringState, chan<- *docker.APIEvents) error œgo.string."func(*docker.eventMonitoringState, chan<- *docker.APIEvents) error"þ~type.func(*"".eventMonitoringState, chan<- *"".APIEvents) error°°·Uv¯3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pœgo.string."func(*docker.eventMonitoringState, chan<- *docker.APIEvents) error"pgo.weak.type.*func(*"".eventMonitoringState, chan<- *"".APIEvents) error€"runtime.zerovalue €~type.func(*"".eventMonitoringState, chan<- *"".APIEvents) errorР~type.func(*"".eventMonitoringState, chan<- *"".APIEvents) error€:type.*"".eventMonitoringState2type.chan<- *"".APIEvents type.errorþˆgo.string."func(*docker.eventMonitoringState, *docker.Client) error" ’8func(*docker.eventMonitoringState, *docker.Client) error ˆgo.string."func(*docker.eventMonitoringState, *docker.Client) error"þjtype.func(*"".eventMonitoringState, *"".Client) error°°oÍÙ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pˆgo.string."func(*docker.eventMonitoringState, *docker.Client) error"p|go.weak.type.*func(*"".eventMonitoringState, *"".Client) error€"runtime.zerovalue €jtype.func(*"".eventMonitoringState, *"".Client) errorРjtype.func(*"".eventMonitoringState, *"".Client) error€:type.*"".eventMonitoringStatetype.*"".Client type.errorþhgo.string."func(*docker.eventMonitoringState) error"€r(func(*docker.eventMonitoringState) error hgo.string."func(*docker.eventMonitoringState) error"þRtype.func(*"".eventMonitoringState) error  ñèo‚3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(*docker.eventMonitoringState) error"pdgo.weak.type.*func(*"".eventMonitoringState) error€"runtime.zerovalue €Rtype.func(*"".eventMonitoringState) errorÐRtype.func(*"".eventMonitoringState) error€:type.*"".eventMonitoringStatetype.errorþfgo.string."func(*docker.eventMonitoringState) bool"pp'func(*docker.eventMonitoringState) bool fgo.string."func(*docker.eventMonitoringState) bool"þPtype.func(*"".eventMonitoringState) bool  ÆmÓù3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(*docker.eventMonitoringState) bool"pbgo.weak.type.*func(*"".eventMonitoringState) bool€"runtime.zerovalue €Ptype.func(*"".eventMonitoringState) boolÐPtype.func(*"".eventMonitoringState) bool€:type.*"".eventMonitoringStatetype.boolþ|go.string."func(*docker.eventMonitoringState, *docker.Client)"†2func(*docker.eventMonitoringState, *docker.Client) |go.string."func(*docker.eventMonitoringState, *docker.Client)"þ^type.func(*"".eventMonitoringState, *"".Client)  "8:"3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P|go.string."func(*docker.eventMonitoringState, *docker.Client)"ppgo.weak.type.*func(*"".eventMonitoringState, *"".Client)€"runtime.zerovalue €^type.func(*"".eventMonitoringState, *"".Client)Р^type.func(*"".eventMonitoringState, *"".Client)€:type.*"".eventMonitoringStatetype.*"".Clientþ‚go.string."func(*docker.eventMonitoringState, *docker.APIEvents)"Œ5func(*docker.eventMonitoringState, *docker.APIEvents) ‚go.string."func(*docker.eventMonitoringState, *docker.APIEvents)"þdtype.func(*"".eventMonitoringState, *"".APIEvents)  ð43 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P‚go.string."func(*docker.eventMonitoringState, *docker.APIEvents)"pvgo.weak.type.*func(*"".eventMonitoringState, *"".APIEvents)€"runtime.zerovalue €dtype.func(*"".eventMonitoringState, *"".APIEvents)Рdtype.func(*"".eventMonitoringState, *"".APIEvents)€:type.*"".eventMonitoringState$type.*"".APIEventsþgo.string."Add"0(Add go.string."Add"þ*go.string."func(int)"@4 func(int) *go.string."func(int)"þtype.func(int)„æñ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P*go.string."func(int)"p.go.weak.type.*func(int)€"runtime.zerovalue €type.func(int)Ðtype.func(int)€type.intþ go.string."Done"0*Done go.string."Done"þ$go.string."func()"0.func() $go.string."func()"þtype.func()€€ö¼‚ö3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P$go.string."func()"p(go.weak.type.*func()€"runtime.zerovalue €type.func()Ѐtype.func()þ go.string."Lock"0*Lock go.string."Lock"þ"go.string."RLock"0,RLock "go.string."RLock"þ&go.string."RLocker"00RLocker &go.string."RLocker"þcloseListeners 4go.string."closeListeners"þ8go.string."connectWithRetry"PBconnectWithRetry 8go.string."connectWithRetry"þLgo.string."func(*docker.Client) error"`Vfunc(*docker.Client) error Lgo.string."func(*docker.Client) error"þ6type.func(*"".Client) error  j ß3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(*docker.Client) error"pHgo.weak.type.*func(*"".Client) error€"runtime.zerovalue €6type.func(*"".Client) errorÐ6type.func(*"".Client) error€type.*"".Clienttype.errorþDgo.string."disableEventMonitoring"PNdisableEventMonitoring Dgo.string."disableEventMonitoring"þ0go.string."func() error"@: func() error 0go.string."func() error"þ"type.func() errorœ‚Öµ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."func() error"p4go.weak.type.*func() error€"runtime.zerovalue €"type.func() errorЀ"type.func() error€type.errorþBgo.string."enableEventMonitoring"PLenableEventMonitoring Bgo.string."enableEventMonitoring"þ*go.string."isEnabled"@4 isEnabled *go.string."isEnabled"þ.go.string."func() bool"@8 func() bool .go.string."func() bool"þ type.func() boolTËx3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P.go.string."func() bool"p2go.weak.type.*func() bool€"runtime.zerovalue € type.func() boolЀ type.func() bool€type.boolþ2go.string."monitorEvents"@< monitorEvents 2go.string."monitorEvents"þ@go.string."func(*docker.Client)"PJfunc(*docker.Client) @go.string."func(*docker.Client)"þ*type.func(*"".Client)öÓ¹¾3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."func(*docker.Client)"premoveListener 4go.string."removeListener"þ*go.string."sendEvent"@4 sendEvent *go.string."sendEvent"þFgo.string."func(*docker.APIEvents)"PPfunc(*docker.APIEvents) Fgo.string."func(*docker.APIEvents)"þ0type.func(*"".APIEvents)¹Ñå”3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."func(*docker.APIEvents)"pBgo.weak.type.*func(*"".APIEvents)€"runtime.zerovalue €0type.func(*"".APIEvents)Ð0type.func(*"".APIEvents)€$type.*"".APIEventsþ4go.string."updateLastSeen"@>updateLastSeen 4go.string."updateLastSeen"þ:type.*"".eventMonitoringState“¦‡ô6ä   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."*docker.eventMonitoringState"pLgo.weak.type.**"".eventMonitoringState€"runtime.zerovalue8type."".eventMonitoringState` :type.*"".eventMonitoringStateÀð:type.*"".eventMonitoringStateðgo.string."Add"type.func(int) Ptype.func(*"".eventMonitoringState, int)°<"".(*eventMonitoringState).AddÀ<"".(*eventMonitoringState).AddÐ go.string."Done"ðtype.func()€Ftype.func(*"".eventMonitoringState)>"".(*eventMonitoringState).Done >"".(*eventMonitoringState).Done° go.string."Lock"Ðtype.func()àFtype.func(*"".eventMonitoringState)ð>"".(*eventMonitoringState).Lock€>"".(*eventMonitoringState).Lock"go.string."RLock"°type.func()ÀFtype.func(*"".eventMonitoringState)Ð@"".(*eventMonitoringState).RLockà@"".(*eventMonitoringState).RLockð&go.string."RLocker".type.func() sync.Locker ^type.func(*"".eventMonitoringState) sync.Locker°D"".(*eventMonitoringState).RLockerÀD"".(*eventMonitoringState).RLockerÐ&go.string."RUnlock"ðtype.func()€Ftype.func(*"".eventMonitoringState)D"".(*eventMonitoringState).RUnlock D"".(*eventMonitoringState).RUnlock°$go.string."Unlock"Ðtype.func()àFtype.func(*"".eventMonitoringState)ðB"".(*eventMonitoringState).Unlock€B"".(*eventMonitoringState).Unlock go.string."Wait"°type.func()ÀFtype.func(*"".eventMonitoringState)Ð>"".(*eventMonitoringState).Waità>"".(*eventMonitoringState).Waitð.go.string."addListener"€"go.importpath."".Jtype.func(chan<- *"".APIEvents) error ~type.func(*"".eventMonitoringState, chan<- *"".APIEvents) error°L"".(*eventMonitoringState).addListenerÀL"".(*eventMonitoringState).addListenerÐ4go.string."closeListeners"à"go.importpath."".ðtype.func()€ Ftype.func(*"".eventMonitoringState) R"".(*eventMonitoringState).closeListeners  R"".(*eventMonitoringState).closeListeners° 8go.string."connectWithRetry"À "go.importpath."".Ð 6type.func(*"".Client) errorà jtype.func(*"".eventMonitoringState, *"".Client) errorð V"".(*eventMonitoringState).connectWithRetry€ +V"".(*eventMonitoringState).connectWithRetry +Dgo.string."disableEventMonitoring"  +"go.importpath."".° +"type.func() errorÀ +Rtype.func(*"".eventMonitoringState) errorÐ +b"".(*eventMonitoringState).disableEventMonitoringà +b"".(*eventMonitoringState).disableEventMonitoringð +Bgo.string."enableEventMonitoring"€ "go.importpath."". 6type.func(*"".Client) error  jtype.func(*"".eventMonitoringState, *"".Client) error° `"".(*eventMonitoringState).enableEventMonitoringÀ `"".(*eventMonitoringState).enableEventMonitoringÐ *go.string."isEnabled"à "go.importpath."".ð  type.func() bool€ Ptype.func(*"".eventMonitoringState) bool H"".(*eventMonitoringState).isEnabled  H"".(*eventMonitoringState).isEnabled° 2go.string."monitorEvents"À "go.importpath."".Ð *type.func(*"".Client)à ^type.func(*"".eventMonitoringState, *"".Client)ð P"".(*eventMonitoringState).monitorEvents€ P"".(*eventMonitoringState).monitorEvents .go.string."noListeners"  "go.importpath."".°  type.func() boolÀ Ptype.func(*"".eventMonitoringState) boolÐ L"".(*eventMonitoringState).noListenersà L"".(*eventMonitoringState).noListenersð 4go.string."removeListener"€"go.importpath."".Jtype.func(chan<- *"".APIEvents) error ~type.func(*"".eventMonitoringState, chan<- *"".APIEvents) error°R"".(*eventMonitoringState).removeListenerÀR"".(*eventMonitoringState).removeListenerÐ*go.string."sendEvent"à"go.importpath."".ð0type.func(*"".APIEvents)€dtype.func(*"".eventMonitoringState, *"".APIEvents)H"".(*eventMonitoringState).sendEvent H"".(*eventMonitoringState).sendEvent°4go.string."updateLastSeen"À"go.importpath."".Ð0type.func(*"".APIEvents)àdtype.func(*"".eventMonitoringState, *"".APIEvents)ðR"".(*eventMonitoringState).updateLastSeen€R"".(*eventMonitoringState).updateLastSeenþ(Q,3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(docker.APIVersion) bool"pLgo.weak.type.*func("".APIVersion) bool€"runtime.zerovalue €:type.func("".APIVersion) boolÐ:type.func("".APIVersion) bool€$type."".APIVersiontype.boolþ2go.string."func() string"@< func() string 2go.string."func() string"þ$type.func() string¢mË3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."func() string"p6go.weak.type.*func() string€"runtime.zerovalue €$type.func() stringЀ$type.func() string€type.stringþNgo.string."func(docker.APIVersion) int"`Xfunc(docker.APIVersion) int Ngo.string."func(docker.APIVersion) int"þ8type.func("".APIVersion) int  mÂq3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."func(docker.APIVersion) int"pJgo.weak.type.*func("".APIVersion) int€"runtime.zerovalue €8type.func("".APIVersion) intÐ8type.func("".APIVersion) int€$type."".APIVersiontype.intþ&type.*"".APIVersion°°{?׋6N   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptype.func("".APIVersion) string  Wø+3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."func(docker.APIVersion) string"pPgo.weak.type.*func("".APIVersion) string€"runtime.zerovalue €>type.func("".APIVersion) stringÐ>type.func("".APIVersion) string€$type."".APIVersiontype.stringþtgo.string."func(docker.APIVersion, docker.APIVersion) int"€~.func(docker.APIVersion, docker.APIVersion) int tgo.string."func(docker.APIVersion, docker.APIVersion) int"þVtype.func("".APIVersion, "".APIVersion) int°°[î@ 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."func(docker.APIVersion, docker.APIVersion) int"phgo.weak.type.*func("".APIVersion, "".APIVersion) int€"runtime.zerovalue €Vtype.func("".APIVersion, "".APIVersion) intРVtype.func("".APIVersion, "".APIVersion) int€$type."".APIVersion$type."".APIVersion type.intþ$type."".APIVersion°°D¨ R   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P:go.string."docker.APIVersion"p&type.*"".APIVersion€"runtime.zerovaluetype.int` $type."".APIVersion ,go.string."APIVersion"°"go.importpath."".Àð$type."".APIVersionð.go.string."GreaterThan":type.func("".APIVersion) bool Xtype.func("".APIVersion, "".APIVersion) bool°8"".(*APIVersion).GreaterThanÀ2"".APIVersion.GreaterThanÐ@go.string."GreaterThanOrEqualTo"ð:type.func("".APIVersion) bool€Xtype.func("".APIVersion, "".APIVersion) boolJ"".(*APIVersion).GreaterThanOrEqualTo D"".APIVersion.GreaterThanOrEqualTo°(go.string."LessThan"Ð:type.func("".APIVersion) boolàXtype.func("".APIVersion, "".APIVersion) boolð2"".(*APIVersion).LessThan€,"".APIVersion.LessThan:go.string."LessThanOrEqualTo"°:type.func("".APIVersion) boolÀXtype.func("".APIVersion, "".APIVersion) boolÐD"".(*APIVersion).LessThanOrEqualToà>"".APIVersion.LessThanOrEqualToð$go.string."String"$type.func() string >type.func("".APIVersion) string°."".(*APIVersion).StringÀ("".APIVersion.StringÐ&go.string."compare"à"go.importpath."".ð8type.func("".APIVersion) int€Vtype.func("".APIVersion, "".APIVersion) int0"".(*APIVersion).compare *"".APIVersion.compareþbruntime.gcbits.0x84888488444884440000000000000000 „ˆ„ˆDH„Dþ2go.string."docker.Client"@< docker.Client 2go.string."docker.Client"þDgo.string."SkipServerVersionCheck"PNSkipServerVersionCheck Dgo.string."SkipServerVersionCheck"þ,go.string."HTTPClient"@6 +HTTPClient ,go.string."HTTPClient"þ*go.string."TLSConfig"@4 TLSConfig *go.string."TLSConfig"þ(go.string."endpoint"@2endpoint (go.string."endpoint"þ.go.string."endpointURL"@8 endpointURL .go.string."endpointURL"þ0go.string."eventMonitor"@: eventMonitor 0go.string."eventMonitor"þ>go.string."requestedAPIVersion"PHrequestedAPIVersion >go.string."requestedAPIVersion"þ8go.string."serverAPIVersion"PBserverAPIVersion 8go.string."serverAPIVersion"þgo.string."requestedAPIVersion"°"go.importpath."".À$type."".APIVersionð8go.string."serverAPIVersion"€"go.importpath."".$type."".APIVersionÀ*docker.Client 4go.string."*docker.Client"þ€go.string."func(*docker.Client, chan<- *docker.APIEvents) error"Š4func(*docker.Client, chan<- *docker.APIEvents) error €go.string."func(*docker.Client, chan<- *docker.APIEvents) error"þbtype.func(*"".Client, chan<- *"".APIEvents) error°°fLTê3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P€go.string."func(*docker.Client, chan<- *docker.APIEvents) error"ptgo.weak.type.*func(*"".Client, chan<- *"".APIEvents) error€"runtime.zerovalue €btype.func(*"".Client, chan<- *"".APIEvents) errorРbtype.func(*"".Client, chan<- *"".APIEvents) error€type.*"".Client2type.chan<- *"".APIEvents type.errorþ*go.string."struct {}"@4 struct {} *go.string."struct {}"þtype.struct {}ÀÀ¬ö'™  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."struct {}"p.go.weak.type.*struct {}€"runtime.zerovalueÀtype.struct {}þ4go.string."chan struct {}"@>chan struct {} 4go.string."chan struct {}"þ&type.chan struct {}°°Så^\2   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."chan struct {}"p8go.weak.type.*chan struct {}€"runtime.zerovaluetype.struct {}þRgo.typelink.chan struct {}/chan struct {}&type.chan struct {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·af3107c17ee1ab6f9f33230b5c7e3062þTgclocals·9c703c5c7b9c1932c840b69f8ebce236þTtype..hashfunc."".AttachToContainerOptionsLtype..hash."".AttachToContainerOptionsþPtype..eqfunc."".AttachToContainerOptionsHtype..eq."".AttachToContainerOptionsþJtype..alg."".AttachToContainerOptions Ttype..hashfunc."".AttachToContainerOptionsPtype..eqfunc."".AttachToContainerOptionsþXgo.string."*docker.AttachToContainerOptions"pb *docker.AttachToContainerOptions Xgo.string."*docker.AttachToContainerOptions"þBtype.*"".AttachToContainerOptions  iÉßÒ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."*docker.AttachToContainerOptions"pTgo.weak.type.**"".AttachToContainerOptions€"runtime.zerovalue@type."".AttachToContainerOptionsþbruntime.gcbits.0x488c8c8c8484c4c8c848480000000000 HŒŒŒ„„ÄÈÈHHþVgo.string."docker.AttachToContainerOptions"``docker.AttachToContainerOptions Vgo.string."docker.AttachToContainerOptions"þ*go.string."Container"@4 Container *go.string."Container"þ(go.string."qs:\"-\""0.qs:"-" (go.string."qs:\"-\""þ.go.string."InputStream"@8 InputStream .go.string."InputStream"þ0go.string."OutputStream"@: OutputStream 0go.string."OutputStream"þ.go.string."ErrorStream"@8 ErrorStream .go.string."ErrorStream"þ go.string."Logs"0*Logs go.string."Logs"þ$go.string."Stream"0.Stream $go.string."Stream"þ"go.string."Stdin"0,Stdin "go.string."Stdin"þ$go.string."Stdout"0.Stdout $go.string."Stdout"þ$go.string."Stderr"0.Stderr $go.string."Stderr"þ&go.string."Success"00Success &go.string."Success"þ.go.string."RawTerminal"@8 RawTerminal .go.string."RawTerminal"þHgo.string."AttachToContainerOptions"`RAttachToContainerOptions Hgo.string."AttachToContainerOptions"þ@type."".AttachToContainerOptions€ € XÕh¢u  0@ABCDHPJ Jtype..alg."".AttachToContainerOptions0bruntime.gcbits.0x488c8c8c8484c4c8c848480000000000PVgo.string."docker.AttachToContainerOptions"pBtype.*"".AttachToContainerOptions€"runtime.zerovalueÀ@type."".AttachToContainerOptionsÀ*go.string."Container"àtype.stringð(go.string."qs:\"-\"".go.string."InputStream"°type.io.ReaderÀ(go.string."qs:\"-\""à0go.string."OutputStream"€type.io.Writer(go.string."qs:\"-\""°.go.string."ErrorStream"Ðtype.io.Writerà(go.string."qs:\"-\""€ go.string."Logs" type.boolÐ$go.string."Stream"ðtype.bool "go.string."Stdin"Àtype.boolð$go.string."Stdout"type.boolÀ$go.string."Stderr"àtype.bool&go.string."Success"°&type.chan struct {}à.go.string."RawTerminal"€type.bool(go.string."qs:\"-\""`°@type."".AttachToContainerOptions°Hgo.string."AttachToContainerOptions"À"go.importpath."".Ѐ @type."".AttachToContainerOptionsþŽgo.string."func(*docker.Client, docker.AttachToContainerOptions) error" ˜;func(*docker.Client, docker.AttachToContainerOptions) error Žgo.string."func(*docker.Client, docker.AttachToContainerOptions) error"þptype.func(*"".Client, "".AttachToContainerOptions) error°°¼r߉3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(*docker.Client, docker.AttachToContainerOptions) error"p‚go.weak.type.*func(*"".Client, "".AttachToContainerOptions) error€"runtime.zerovalue €ptype.func(*"".Client, "".AttachToContainerOptions) errorРptype.func(*"".Client, "".AttachToContainerOptions) error€type.*"".Client@type."".AttachToContainerOptions type.errorþ‚go.string."func(*docker.Client, *docker.AuthConfiguration) error"Œ5func(*docker.Client, *docker.AuthConfiguration) error ‚go.string."func(*docker.Client, *docker.AuthConfiguration) error"þdtype.func(*"".Client, *"".AuthConfiguration) error°°Qžrá3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P‚go.string."func(*docker.Client, *docker.AuthConfiguration) error"pvgo.weak.type.*func(*"".Client, *"".AuthConfiguration) error€"runtime.zerovalue €dtype.func(*"".Client, *"".AuthConfiguration) errorРdtype.func(*"".Client, *"".AuthConfiguration) error€type.*"".Client4type.*"".AuthConfiguration type.errorþJgo.string."*docker.BuildImageOptions"`T*docker.BuildImageOptions Jgo.string."*docker.BuildImageOptions"þ4type.*"".BuildImageOptions  “ª¥6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."*docker.BuildImageOptions"pFgo.weak.type.**"".BuildImageOptions€"runtime.zerovalue2type."".BuildImageOptionsþbruntime.gcbits.0x48484444488c8c848484848484480000 HHDDHŒŒ„„„„„„HþHgo.string."docker.BuildImageOptions"`Rdocker.BuildImageOptions Hgo.string."docker.BuildImageOptions"þ go.string."Name"0*Name go.string."Name"þ(go.string."qs:\"t\""0.qs:"t" (go.string."qs:\"t\""þ,go.string."Dockerfile"@6 +Dockerfile ,go.string."Dockerfile"þ:go.string."qs:\"dockerfile\""@@qs:"dockerfile" :go.string."qs:\"dockerfile\""þ&go.string."NoCache"00NoCache &go.string."NoCache"þ4go.string."qs:\"nocache\""@: qs:"nocache" 4go.string."qs:\"nocache\""þ4go.string."SuppressOutput"@>SuppressOutput 4go.string."SuppressOutput"þ(go.string."qs:\"q\""0.qs:"q" (go.string."qs:\"q\""þ go.string."Pull"0*Pull go.string."Pull"þ.go.string."qs:\"pull\""@4 qs:"pull" .go.string."qs:\"pull\""þ4go.string."RmTmpContainer"@>RmTmpContainer 4go.string."RmTmpContainer"þ*go.string."qs:\"rm\""00qs:"rm" *go.string."qs:\"rm\""þ>go.string."ForceRmTmpContainer"PHForceRmTmpContainer >go.string."ForceRmTmpContainer"þ4go.string."qs:\"forcerm\""@: qs:"forcerm" 4go.string."qs:\"forcerm\""þ$go.string."Memory"0.Memory $go.string."Memory"þ2go.string."qs:\"memory\""@8 qs:"memory" 2go.string."qs:\"memory\""þ&go.string."Memswap"00Memswap &go.string."Memswap"þ4go.string."qs:\"memswap\""@: qs:"memswap" 4go.string."qs:\"memswap\""þ*go.string."CPUShares"@4 CPUShares *go.string."CPUShares"þ8go.string."qs:\"cpushares\""@>qs:"cpushares" 8go.string."qs:\"cpushares\""þ,go.string."CPUSetCPUs"@6 +CPUSetCPUs ,go.string."CPUSetCPUs"þ:go.string."qs:\"cpusetcpus\""@@qs:"cpusetcpus" :go.string."qs:\"cpusetcpus\""þ2go.string."RawJSONStream"@< RawJSONStream 2go.string."RawJSONStream"þ$go.string."Remote"0.Remote $go.string."Remote"þ2go.string."qs:\"remote\""@8 qs:"remote" 2go.string."qs:\"remote\""þ.go.string."AuthConfigs"@8 AuthConfigs .go.string."AuthConfigs"þ,go.string."ContextDir"@6 +ContextDir ,go.string."ContextDir"þ:go.string."BuildImageOptions"PDBuildImageOptions :go.string."BuildImageOptions"þ2type."".BuildImageOptions° ° à"& W !"#$(08@P`pxˆÈЀ à runtime.algarray0bruntime.gcbits.0x48484444488c8c848484848484480000PHgo.string."docker.BuildImageOptions"p4type.*"".BuildImageOptions€"runtime.zerovalueÀ2type."".BuildImageOptionsÀ go.string."Name"àtype.stringð(go.string."qs:\"t\"",go.string."Dockerfile"°type.stringÀ:go.string."qs:\"dockerfile\""à&go.string."NoCache"€type.bool4go.string."qs:\"nocache\""°4go.string."SuppressOutput"Ðtype.boolà(go.string."qs:\"q\""€ go.string."Pull" type.bool°.go.string."qs:\"pull\""Ð4go.string."RmTmpContainer"ðtype.bool€*go.string."qs:\"rm\"" >go.string."ForceRmTmpContainer"Àtype.boolÐ4go.string."qs:\"forcerm\""ð$go.string."Memory"type.int64 2go.string."qs:\"memory\""À&go.string."Memswap"àtype.int64ð4go.string."qs:\"memswap\""*go.string."CPUShares"°type.int64À8go.string."qs:\"cpushares\""à,go.string."CPUSetCPUs"€type.string:go.string."qs:\"cpusetcpus\""°.go.string."InputStream"Ðtype.io.Readerà(go.string."qs:\"-\""€ 0go.string."OutputStream"  type.io.Writer° (go.string."qs:\"-\""Ð 2go.string."RawJSONStream"ð type.bool€ +(go.string."qs:\"-\""  +$go.string."Remote"À +type.stringÐ +2go.string."qs:\"remote\""ð + go.string."Auth" 2type."".AuthConfiguration  (go.string."qs:\"-\""À .go.string."AuthConfigs"à 4type."".AuthConfigurationsð (go.string."qs:\"-\"" ,go.string."ContextDir"° type.stringÀ (go.string."qs:\"-\""`à 2type."".BuildImageOptionsà :go.string."BuildImageOptions"ð "go.importpath."".€ ° 2type."".BuildImageOptionsþ€go.string."func(*docker.Client, docker.BuildImageOptions) error"Š4func(*docker.Client, docker.BuildImageOptions) error €go.string."func(*docker.Client, docker.BuildImageOptions) error"þbtype.func(*"".Client, "".BuildImageOptions) error°°`÷cÒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P€go.string."func(*docker.Client, docker.BuildImageOptions) error"ptgo.weak.type.*func(*"".Client, "".BuildImageOptions) error€"runtime.zerovalue €btype.func(*"".Client, "".BuildImageOptions) errorРbtype.func(*"".Client, "".BuildImageOptions) error€type.*"".Client2type."".BuildImageOptions type.errorþ0go.string."*docker.Port"@: *docker.Port 0go.string."*docker.Port"þ go.string."Port"0*Port go.string."Port"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þ"go.string."Proto"0,Proto "go.string."Proto"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þJgo.string."func(*docker.Port) string"`Tfunc(*docker.Port) string Jgo.string."func(*docker.Port) string"þ4type.func(*"".Port) string  b)–3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."func(*docker.Port) string"pFgo.weak.type.*func(*"".Port) string€"runtime.zerovalue €4type.func(*"".Port) stringÐ4type.func(*"".Port) string€type.*"".Porttype.stringþtype.*"".Port°°/hß 6$   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."*docker.Port"p,go.weak.type.**"".Port€"runtime.zerovaluetype."".Port` type.*"".PortÀðtype.*"".Portð go.string."Port"$type.func() string 4type.func(*"".Port) string°"".(*Port).PortÀ"".(*Port).PortÐ"go.string."Proto"ð$type.func() string€4type.func(*"".Port) string "".(*Port).Proto  "".(*Port).Protoþbruntime.gcbits.0x48000000000000000000000000000000 Hþ.go.string."docker.Port"@8 docker.Port .go.string."docker.Port"þHgo.string."func(docker.Port) string"`Rfunc(docker.Port) string Hgo.string."func(docker.Port) string"þ2type.func("".Port) string  ^uÇ…3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."func(docker.Port) string"pDgo.weak.type.*func("".Port) string€"runtime.zerovalue €2type.func("".Port) stringÐ2type.func("".Port) string€type."".Porttype.stringþtype."".Port  •?M& À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000P.go.string."docker.Port"ptype.*"".Port€"runtime.zerovalue`type."".Port go.string."Port" "go.importpath."".°àtype."".Portà go.string."Port"€$type.func() string2type.func("".Port) string "".(*Port).Port°"".Port.PortÀ"go.string."Proto"à$type.func() stringð2type.func("".Port) string€ "".(*Port).Proto"".Port.Protoþ2go.string."[]docker.Port"@< []docker.Port 2go.string."[]docker.Port"þtype.[]"".Port  ?ä‰#   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P2go.string."[]docker.Port"p.go.weak.type.*[]"".Port€"runtime.zerovaluetype."".PortþFgo.typelink.[]docker.Port/[]"".Porttype.[]"".PortþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ2type..hashfunc.[8]"".Port*type..hash.[8]"".Portþ.type..eqfunc.[8]"".Port&type..eq.[8]"".Portþ(type..alg.[8]"".Port 2type..hashfunc.[8]"".Port.type..eqfunc.[8]"".Portþ4go.string."[8]docker.Port"@>[8]docker.Port 4go.string."[8]docker.Port"þtype.[8]"".PortÀÀ€~/(ã (type..alg.[8]"".Port0bruntime.gcbits.0x48484848484848480000000000000000P4go.string."[8]docker.Port"p0go.weak.type.*[8]"".Port€"runtime.zerovaluetype."".Port type.[]"".PortþJgo.typelink.[8]docker.Port/[8]"".Porttype.[8]"".Portþ.go.string."[]struct {}"@8 []struct {} .go.string."[]struct {}"þ type.[]struct {}  ºÌ¥…   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P.go.string."[]struct {}"p2go.weak.type.*[]struct {}€"runtime.zerovaluetype.struct {}þFgo.typelink.[]struct {}/[]struct {} type.[]struct {}þ0go.string."[8]struct {}"@: [8]struct {} 0go.string."[8]struct {}"þ"type.[8]struct {}ÀÀ>ƒy ‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P0go.string."[8]struct {}"p4go.weak.type.*[8]struct {}€"runtime.zerovaluetype.struct {}  type.[]struct {}þJgo.typelink.[8]struct {}/[8]struct {}"type.[8]struct {}þZgo.string."*map.bucket[docker.Port]struct {}"pd!*map.bucket[docker.Port]struct {} Zgo.string."*map.bucket[docker.Port]struct {}"þDtype.*map.bucket["".Port]struct {}  1S6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."*map.bucket[docker.Port]struct {}"pVgo.weak.type.**map.bucket["".Port]struct {}€"runtime.zerovalueBtype.map.bucket["".Port]struct {}þbruntime.gcbits.0x84848484848484848400000000000000 „„„„„„„„„þXgo.string."map.bucket[docker.Port]struct {}"pb map.bucket[docker.Port]struct {} Xgo.string."map.bucket[docker.Port]struct {}"þBtype.map.bucket["".Port]struct {}°°±(눈 à runtime.algarray0bruntime.gcbits.0x84848484848484848400000000000000PXgo.string."map.bucket[docker.Port]struct {}"pTgo.weak.type.*map.bucket["".Port]struct {}€"runtime.zerovalueÀBtype.map.bucket["".Port]struct {}À go.string."keys"àtype.[8]"".Port$go.string."values"°"type.[8]struct {}à(go.string."overflow"€Dtype.*map.bucket["".Port]struct {}þRgo.string."map.hdr[docker.Port]struct {}"`\map.hdr[docker.Port]struct {} Rgo.string."map.hdr[docker.Port]struct {}"þYˆ à runtime.algarray0Btype..gc.map.bucket[string]string@Jtype..gcprog.map.bucket[string]stringPHgo.string."map.bucket[string]string"pLgo.weak.type.*map.bucket[string]string€"runtime.zerovalueÀ:type.map.bucket[string]stringÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]stringà(go.string."overflow"€go.weak.type.*map[string]string€"runtime.zerovaluetype.string type.string°:type.map.bucket[string]stringÀ4type.map.hdr[string]stringþ^go.typelink.map[string]string/map[string]string,type.map[string]stringþ,$type..gc."".Config0þ,type..gcprog."".Config.fVY–Y–eš™•e þ2go.string."docker.Config"@< docker.Config 2go.string."docker.Config"þ(go.string."Hostname"@2Hostname (go.string."Hostname"þ†go.string."json:\"Hostname,omitempty\" yaml:\"Hostname,omitempty\""ˆ3json:"Hostname,omitempty" yaml:"Hostname,omitempty" †go.string."json:\"Hostname,omitempty\" yaml:\"Hostname,omitempty\""þ,go.string."Domainname"@6 +Domainname ,go.string."Domainname"þŽgo.string."json:\"Domainname,omitempty\" yaml:\"Domainname,omitempty\""7json:"Domainname,omitempty" yaml:"Domainname,omitempty" Žgo.string."json:\"Domainname,omitempty\" yaml:\"Domainname,omitempty\""þ go.string."User"0*User go.string."User"þvgo.string."json:\"User,omitempty\" yaml:\"User,omitempty\""€x+json:"User,omitempty" yaml:"User,omitempty" vgo.string."json:\"User,omitempty\" yaml:\"User,omitempty\""þ~go.string."json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""€€/json:"Memory,omitempty" yaml:"Memory,omitempty" ~go.string."json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""þ,go.string."MemorySwap"@6 +MemorySwap ,go.string."MemorySwap"þŽgo.string."json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""7json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" Žgo.string."json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""þŠgo.string."json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""Œ5json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" Šgo.string."json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""þ$go.string."CPUSet"0.CPUSet $go.string."CPUSet"þ~go.string."json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""€€/json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" ~go.string."json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""þ.go.string."AttachStdin"@8 AttachStdin .go.string."AttachStdin"þ’go.string."json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\"" ”9json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" ’go.string."json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\""þ0go.string."AttachStdout"@: AttachStdout 0go.string."AttachStdout"þ–go.string."json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\"" ˜;json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" –go.string."json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\""þ0go.string."AttachStderr"@: AttachStderr 0go.string."AttachStderr"þ–go.string."json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\"" ˜;json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" –go.string."json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\""þ*go.string."PortSpecs"@4 PortSpecs *go.string."PortSpecs"þŠgo.string."json:\"PortSpecs,omitempty\" yaml:\"PortSpecs,omitempty\""Œ5json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty" Šgo.string."json:\"PortSpecs,omitempty\" yaml:\"PortSpecs,omitempty\""þ0go.string."ExposedPorts"@: ExposedPorts 0go.string."ExposedPorts"þ–go.string."json:\"ExposedPorts,omitempty\" yaml:\"ExposedPorts,omitempty\"" ˜;json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty" –go.string."json:\"ExposedPorts,omitempty\" yaml:\"ExposedPorts,omitempty\""þgo.string."Tty"0(Tty go.string."Tty"þrgo.string."json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""€t)json:"Tty,omitempty" yaml:"Tty,omitempty" rgo.string."json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""þ*go.string."OpenStdin"@4 OpenStdin *go.string."OpenStdin"þŠgo.string."json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""Œ5json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" Šgo.string."json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""þ*go.string."StdinOnce"@4 StdinOnce *go.string."StdinOnce"þŠgo.string."json:\"StdinOnce,omitempty\" yaml:\"StdinOnce,omitempty\""Œ5json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty" Šgo.string."json:\"StdinOnce,omitempty\" yaml:\"StdinOnce,omitempty\""þgo.string."Env"0(Env go.string."Env"þrgo.string."json:\"Env,omitempty\" yaml:\"Env,omitempty\""€t)json:"Env,omitempty" yaml:"Env,omitempty" rgo.string."json:\"Env,omitempty\" yaml:\"Env,omitempty\""þgo.string."Cmd"0(Cmd go.string."Cmd"þJgo.string."json:\"Cmd\" yaml:\"Cmd\""PLjson:"Cmd" yaml:"Cmd" Jgo.string."json:\"Cmd\" yaml:\"Cmd\""þgo.string."DNS"0(DNS go.string."DNS"þrgo.string."json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""€t)json:"Dns,omitempty" yaml:"Dns,omitempty" rgo.string."json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""þ"go.string."Image"0,Image "go.string."Image"þzgo.string."json:\"Image,omitempty\" yaml:\"Image,omitempty\""€|-json:"Image,omitempty" yaml:"Image,omitempty" zgo.string."json:\"Image,omitempty\" yaml:\"Image,omitempty\""þ&go.string."Volumes"00Volumes &go.string."Volumes"þ‚go.string."json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""„1json:"Volumes,omitempty" yaml:"Volumes,omitempty" ‚go.string."json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""þ.go.string."VolumesFrom"@8 VolumesFrom .go.string."VolumesFrom"þ’go.string."json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\"" ”9json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" ’go.string."json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""þ,go.string."WorkingDir"@6 +WorkingDir ,go.string."WorkingDir"þŽgo.string."json:\"WorkingDir,omitempty\" yaml:\"WorkingDir,omitempty\""7json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" Žgo.string."json:\"WorkingDir,omitempty\" yaml:\"WorkingDir,omitempty\""þ,go.string."MacAddress"@6 +MacAddress ,go.string."MacAddress"þŽgo.string."json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""7json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" Žgo.string."json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""þ,go.string."Entrypoint"@6 +Entrypoint ,go.string."Entrypoint"þfgo.string."json:\"Entrypoint\" yaml:\"Entrypoint\""ph#json:"Entrypoint" yaml:"Entrypoint" fgo.string."json:\"Entrypoint\" yaml:\"Entrypoint\""þ6go.string."NetworkDisabled"@@NetworkDisabled 6go.string."NetworkDisabled"þ¢go.string."json:\"NetworkDisabled,omitempty\" yaml:\"NetworkDisabled,omitempty\""°¤Ajson:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty" ¢go.string."json:\"NetworkDisabled,omitempty\" yaml:\"NetworkDisabled,omitempty\""þ0go.string."SecurityOpts"@: SecurityOpts 0go.string."SecurityOpts"þ–go.string."json:\"SecurityOpts,omitempty\" yaml:\"SecurityOpts,omitempty\"" ˜;json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty" –go.string."json:\"SecurityOpts,omitempty\" yaml:\"SecurityOpts,omitempty\""þ&go.string."OnBuild"00OnBuild &go.string."OnBuild"þ‚go.string."json:\"OnBuild,omitempty\" yaml:\"OnBuild,omitempty\""„1json:"OnBuild,omitempty" yaml:"OnBuild,omitempty" ‚go.string."json:\"OnBuild,omitempty\" yaml:\"OnBuild,omitempty\""þ$go.string."Labels"0.Labels $go.string."Labels"þ~go.string."json:\"Labels,omitempty\" yaml:\"Labels,omitempty\""€€/json:"Labels,omitempty" yaml:"Labels,omitempty" ~go.string."json:\"Labels,omitempty\" yaml:\"Labels,omitempty\""þ$go.string."Config"0.Config $go.string."Config"þtype."".ConfigÐÐpòa0zY 08@HXYZ`x€‚ˆ ¸Ðàèø08Ph¾ à runtime.algarray0$type..gc."".Config@,type..gcprog."".ConfigP2go.string."docker.Config"ptype.*"".Config€"runtime.zerovalueÀtype."".ConfigÀ(go.string."Hostname"àtype.stringð†go.string."json:\"Hostname,omitempty\" yaml:\"Hostname,omitempty\"",go.string."Domainname"°type.stringÀŽgo.string."json:\"Domainname,omitempty\" yaml:\"Domainname,omitempty\""à go.string."User"€type.stringvgo.string."json:\"User,omitempty\" yaml:\"User,omitempty\""°$go.string."Memory"Ðtype.int64à~go.string."json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""€,go.string."MemorySwap" type.int64°Žgo.string."json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""Ð*go.string."CPUShares"ðtype.int64€Šgo.string."json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\"" $go.string."CPUSet"Àtype.stringÐ~go.string."json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""ð.go.string."AttachStdin"type.bool ’go.string."json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\""À0go.string."AttachStdout"àtype.boolð–go.string."json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\""0go.string."AttachStderr"°type.boolÀ–go.string."json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\""à*go.string."PortSpecs"€type.[]stringŠgo.string."json:\"PortSpecs,omitempty\" yaml:\"PortSpecs,omitempty\""°0go.string."ExposedPorts"Ð4type.map["".Port]struct {}à–go.string."json:\"ExposedPorts,omitempty\" yaml:\"ExposedPorts,omitempty\""€ go.string."Tty"  type.bool° rgo.string."json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""Ð *go.string."OpenStdin"ð type.bool€ +Šgo.string."json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""  +*go.string."StdinOnce"À +type.boolÐ +Šgo.string."json:\"StdinOnce,omitempty\" yaml:\"StdinOnce,omitempty\""ð +go.string."Env" type.[]string  rgo.string."json:\"Env,omitempty\" yaml:\"Env,omitempty\""À go.string."Cmd"à type.[]stringð Jgo.string."json:\"Cmd\" yaml:\"Cmd\"" go.string."DNS"° type.[]stringÀ rgo.string."json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""à "go.string."Image"€ type.string zgo.string."json:\"Image,omitempty\" yaml:\"Image,omitempty\""° &go.string."Volumes"Ð 2type.map[string]struct {}à ‚go.string."json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""€.go.string."VolumesFrom" type.string°’go.string."json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""Ð,go.string."WorkingDir"ðtype.string€Žgo.string."json:\"WorkingDir,omitempty\" yaml:\"WorkingDir,omitempty\"" ,go.string."MacAddress"Àtype.stringÐŽgo.string."json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""ð,go.string."Entrypoint"type.[]string fgo.string."json:\"Entrypoint\" yaml:\"Entrypoint\""À6go.string."NetworkDisabled"àtype.boolð¢go.string."json:\"NetworkDisabled,omitempty\" yaml:\"NetworkDisabled,omitempty\""0go.string."SecurityOpts"°type.[]stringÀ–go.string."json:\"SecurityOpts,omitempty\" yaml:\"SecurityOpts,omitempty\""à&go.string."OnBuild"€type.[]string‚go.string."json:\"OnBuild,omitempty\" yaml:\"OnBuild,omitempty\""°$go.string."Labels"Ð,type.map[string]stringà~go.string."json:\"Labels,omitempty\" yaml:\"Labels,omitempty\""`€type."".Config€$go.string."Config""go.importpath."". Ðtype."".Configþ4go.string."*docker.Config"@>*docker.Config 4go.string."*docker.Config"þtype.*"".Config  š*¤6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*docker.Config"p0go.weak.type.**"".Config€"runtime.zerovaluetype."".ConfigþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þPtype..hashfunc."".CommitContainerOptionsHtype..hash."".CommitContainerOptionsþLtype..eqfunc."".CommitContainerOptionsDtype..eq."".CommitContainerOptionsþFtype..alg."".CommitContainerOptions Ptype..hashfunc."".CommitContainerOptionsLtype..eqfunc."".CommitContainerOptionsþTgo.string."*docker.CommitContainerOptions"`^*docker.CommitContainerOptions Tgo.string."*docker.CommitContainerOptions"þ>type.*"".CommitContainerOptions  ÃJn6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*docker.CommitContainerOptions"pPgo.weak.type.**"".CommitContainerOptions€"runtime.zerovaluetype.*"".CommitContainerOptions€"runtime.zerovalueÀ*docker.Change 4go.string."*docker.Change"þNgo.string."func(*docker.Change) string"`Xfunc(*docker.Change) string Ngo.string."func(*docker.Change) string"þ8type.func(*"".Change) string  .+eÒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."func(*docker.Change) string"pJgo.weak.type.*func(*"".Change) string€"runtime.zerovalue €8type.func(*"".Change) stringÐ8type.func(*"".Change) string€type.*"".Changetype.stringþtype.*"".ChangeÐÐ~IB“6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*docker.Change"p0go.weak.type.**"".Change€"runtime.zerovaluetype."".Change` type.*"".ChangeÀðtype.*"".Changeð$go.string."String"$type.func() string 8type.func(*"".Change) string°&"".(*Change).StringÀ&"".(*Change).Stringþ2go.string."docker.Change"@< docker.Change 2go.string."docker.Change"þ go.string."Path"0*Path go.string."Path"þ go.string."Kind"0*Kind go.string."Kind"þ$go.string."Change"0.Change $go.string."Change"þtype."".Change°°üë·² &type..alg."".Change0bruntime.gcbits.0x48844400000000000000000000000000P2go.string."docker.Change"ptype.*"".Change€"runtime.zerovalueÀtype."".ChangeÀ go.string."Path"àtype.string go.string."Kind"°$type."".ChangeType`àtype."".Changeà$go.string."Change"ð"go.importpath."".€°type."".Changeþ6go.string."[]docker.Change"@@[]docker.Change 6go.string."[]docker.Change"þ type.[]"".Change  f~|;   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P6go.string."[]docker.Change"p2go.weak.type.*[]"".Change€"runtime.zerovaluetype."".ChangeþNgo.typelink.[]docker.Change/[]"".Change type.[]"".Changeþ‚go.string."func(*docker.Client, string) ([]docker.Change, error)"Œ5func(*docker.Client, string) ([]docker.Change, error) ‚go.string."func(*docker.Client, string) ([]docker.Change, error)"þdtype.func(*"".Client, string) ([]"".Change, error)ÀÀˆæ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P‚go.string."func(*docker.Client, string) ([]docker.Change, error)"pvgo.weak.type.*func(*"".Client, string) ([]"".Change, error)€"runtime.zerovalue €dtype.func(*"".Client, string) ([]"".Change, error)Рdtype.func(*"".Client, string) ([]"".Change, error)€type.*"".Clienttype.string  type.[]"".Change°type.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·e13351f28add7c60853cb3aac0a0e34eþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þTtype..hashfunc."".CopyFromContainerOptionsLtype..hash."".CopyFromContainerOptionsþPtype..eqfunc."".CopyFromContainerOptionsHtype..eq."".CopyFromContainerOptionsþJtype..alg."".CopyFromContainerOptions Ttype..hashfunc."".CopyFromContainerOptionsPtype..eqfunc."".CopyFromContainerOptionsþXgo.string."*docker.CopyFromContainerOptions"pb *docker.CopyFromContainerOptions Xgo.string."*docker.CopyFromContainerOptions"þBtype.*"".CopyFromContainerOptions  û¯®26   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."*docker.CopyFromContainerOptions"pTgo.weak.type.**"".CopyFromContainerOptions€"runtime.zerovalue@type."".CopyFromContainerOptionsþbruntime.gcbits.0x8c484800000000000000000000000000 ŒHHþVgo.string."docker.CopyFromContainerOptions"``docker.CopyFromContainerOptions Vgo.string."docker.CopyFromContainerOptions"þ,go.string."json:\"-\""@2json:"-" ,go.string."json:\"-\""þ(go.string."Resource"@2Resource (go.string."Resource"þHgo.string."CopyFromContainerOptions"`RCopyFromContainerOptions Hgo.string."CopyFromContainerOptions"þ@type."".CopyFromContainerOptions€€0 ˆ¸ $ Jtype..alg."".CopyFromContainerOptions0bruntime.gcbits.0x8c484800000000000000000000000000PVgo.string."docker.CopyFromContainerOptions"pBtype.*"".CopyFromContainerOptions€"runtime.zerovalueÀ@type."".CopyFromContainerOptionsÀ0go.string."OutputStream"àtype.io.Writerð,go.string."json:\"-\""*go.string."Container"°type.stringÀ,go.string."json:\"-\""à(go.string."Resource"€type.string`°@type."".CopyFromContainerOptions°Hgo.string."CopyFromContainerOptions"À"go.importpath."".Ѐ@type."".CopyFromContainerOptionsþŽgo.string."func(*docker.Client, docker.CopyFromContainerOptions) error" ˜;func(*docker.Client, docker.CopyFromContainerOptions) error Žgo.string."func(*docker.Client, docker.CopyFromContainerOptions) error"þptype.func(*"".Client, "".CopyFromContainerOptions) error°°äÃÞ­3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(*docker.Client, docker.CopyFromContainerOptions) error"p‚go.weak.type.*func(*"".Client, "".CopyFromContainerOptions) error€"runtime.zerovalue €ptype.func(*"".Client, "".CopyFromContainerOptions) errorРptype.func(*"".Client, "".CopyFromContainerOptions) error€type.*"".Client@type."".CopyFromContainerOptions type.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.string."docker.KeyValuePair"PHdocker.KeyValuePair >go.string."docker.KeyValuePair"þgo.string."Key"0(Key go.string."Key"þrgo.string."json:\"Key,omitempty\" yaml:\"Key,omitempty\""€t)json:"Key,omitempty" yaml:"Key,omitempty" rgo.string."json:\"Key,omitempty\" yaml:\"Key,omitempty\""þ"go.string."Value"0,Value "go.string."Value"þzgo.string."json:\"Value,omitempty\" yaml:\"Value,omitempty\""€|-json:"Value,omitempty" yaml:"Value,omitempty" zgo.string."json:\"Value,omitempty\" yaml:\"Value,omitempty\""þ0go.string."KeyValuePair"@: KeyValuePair 0go.string."KeyValuePair"þ(type."".KeyValuePair°° É¦× 2type..alg."".KeyValuePair0bruntime.gcbits.0x48480000000000000000000000000000P>go.string."docker.KeyValuePair"p*type.*"".KeyValuePair€"runtime.zerovalueÀ(type."".KeyValuePairÀgo.string."Key"àtype.stringðrgo.string."json:\"Key,omitempty\" yaml:\"Key,omitempty\"""go.string."Value"°type.stringÀzgo.string."json:\"Value,omitempty\" yaml:\"Value,omitempty\""`à(type."".KeyValuePairà0go.string."KeyValuePair"ð"go.importpath."".€°(type."".KeyValuePairþBgo.string."[]docker.KeyValuePair"PL[]docker.KeyValuePair Bgo.string."[]docker.KeyValuePair"þ,type.[]"".KeyValuePair  _9ìä   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PBgo.string."[]docker.KeyValuePair"p>go.weak.type.*[]"".KeyValuePair€"runtime.zerovalue(type."".KeyValuePairþfgo.typelink.[]docker.KeyValuePair/[]"".KeyValuePair,type.[]"".KeyValuePairþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ:type..hashfunc."".PortBinding2type..hash."".PortBindingþ6type..eqfunc."".PortBinding.type..eq."".PortBindingþ0type..alg."".PortBinding :type..hashfunc."".PortBinding6type..eqfunc."".PortBindingþ>go.string."*docker.PortBinding"PH*docker.PortBinding >go.string."*docker.PortBinding"þ(type.*"".PortBinding  >!è6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*docker.PortBinding"p:go.weak.type.**"".PortBinding€"runtime.zerovalue&type."".PortBindingþtype..hashfunc."".RestartPolicy6type..hash."".RestartPolicyþ:type..eqfunc."".RestartPolicy2type..eq."".RestartPolicyþ4type..alg."".RestartPolicy >type..hashfunc."".RestartPolicy:type..eqfunc."".RestartPolicyþBgo.string."*docker.RestartPolicy"PL*docker.RestartPolicy Bgo.string."*docker.RestartPolicy"þ,type.*"".RestartPolicy  Û°”J6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*docker.RestartPolicy"p>go.weak.type.**"".RestartPolicy€"runtime.zerovalue*type."".RestartPolicyþ@go.string."docker.RestartPolicy"PJdocker.RestartPolicy @go.string."docker.RestartPolicy"þvgo.string."json:\"Name,omitempty\" yaml:\"Name,omitempty\""€x+json:"Name,omitempty" yaml:"Name,omitempty" vgo.string."json:\"Name,omitempty\" yaml:\"Name,omitempty\""þ:go.string."MaximumRetryCount"PDMaximumRetryCount :go.string."MaximumRetryCount"þªgo.string."json:\"MaximumRetryCount,omitempty\" yaml:\"MaximumRetryCount,omitempty\""°¬Ejson:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" ªgo.string."json:\"MaximumRetryCount,omitempty\" yaml:\"MaximumRetryCount,omitempty\""þ2go.string."RestartPolicy"@< RestartPolicy 2go.string."RestartPolicy"þ*type."".RestartPolicy°°ÿ)6à 4type..alg."".RestartPolicy0bruntime.gcbits.0x48844400000000000000000000000000P@go.string."docker.RestartPolicy"p,type.*"".RestartPolicy€"runtime.zerovalueÀ*type."".RestartPolicyÀ go.string."Name"àtype.stringðvgo.string."json:\"Name,omitempty\" yaml:\"Name,omitempty\"":go.string."MaximumRetryCount"°type.intÀªgo.string."json:\"MaximumRetryCount,omitempty\" yaml:\"MaximumRetryCount,omitempty\""`à*type."".RestartPolicyà2go.string."RestartPolicy"ð"go.importpath."".€°*type."".RestartPolicyþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc."".Device(type..hash."".Deviceþ,type..eqfunc."".Device$type..eq."".Deviceþ&type..alg."".Device 0type..hashfunc."".Device,type..eqfunc."".Deviceþ4go.string."*docker.Device"@>*docker.Device 4go.string."*docker.Device"þtype.*"".Device  un¡|6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*docker.Device"p0go.weak.type.**"".Device€"runtime.zerovaluetype."".Deviceþ2go.string."docker.Device"@< docker.Device 2go.string."docker.Device"þ,go.string."PathOnHost"@6 +PathOnHost ,go.string."PathOnHost"þŽgo.string."json:\"PathOnHost,omitempty\" yaml:\"PathOnHost,omitempty\""7json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty" Žgo.string."json:\"PathOnHost,omitempty\" yaml:\"PathOnHost,omitempty\""þ6go.string."PathInContainer"@@PathInContainer 6go.string."PathInContainer"þ¢go.string."json:\"PathInContainer,omitempty\" yaml:\"PathInContainer,omitempty\""°¤Ajson:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty" ¢go.string."json:\"PathInContainer,omitempty\" yaml:\"PathInContainer,omitempty\""þ:go.string."CgroupPermissions"PDCgroupPermissions :go.string."CgroupPermissions"þªgo.string."json:\"CgroupPermissions,omitempty\" yaml:\"CgroupPermissions,omitempty\""°¬Ejson:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty" ªgo.string."json:\"CgroupPermissions,omitempty\" yaml:\"CgroupPermissions,omitempty\""þ$go.string."Device"0.Device $go.string."Device"þtype."".Device€€0 žøþ & &type..alg."".Device0bruntime.gcbits.0x48484800000000000000000000000000P2go.string."docker.Device"ptype.*"".Device€"runtime.zerovalueÀtype."".DeviceÀ,go.string."PathOnHost"àtype.stringðŽgo.string."json:\"PathOnHost,omitempty\" yaml:\"PathOnHost,omitempty\""6go.string."PathInContainer"°type.stringÀ¢go.string."json:\"PathInContainer,omitempty\" yaml:\"PathInContainer,omitempty\""à:go.string."CgroupPermissions"€type.stringªgo.string."json:\"CgroupPermissions,omitempty\" yaml:\"CgroupPermissions,omitempty\""`°type."".Device°$go.string."Device"À"go.importpath."".Ѐtype."".Deviceþ6go.string."[]docker.Device"@@[]docker.Device 6go.string."[]docker.Device"þ type.[]"".Device  jjW£   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P6go.string."[]docker.Device"p2go.weak.type.*[]"".Device€"runtime.zerovaluetype."".DeviceþNgo.typelink.[]docker.Device/[]"".Device type.[]"".Deviceþ:go.string."*docker.LogConfig"PD*docker.LogConfig :go.string."*docker.LogConfig"þ$type.*"".LogConfig  ²(²Ï6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.LogConfig"p6go.weak.type.**"".LogConfig€"runtime.zerovalue"type."".LogConfigþbruntime.gcbits.0x48888400000000000000000000000000 Hˆ„þ8go.string."docker.LogConfig"PBdocker.LogConfig 8go.string."docker.LogConfig"þ go.string."Type"0*Type go.string."Type"þvgo.string."json:\"Type,omitempty\" yaml:\"Type,omitempty\""€x+json:"Type,omitempty" yaml:"Type,omitempty" vgo.string."json:\"Type,omitempty\" yaml:\"Type,omitempty\""þ*go.string."LogConfig"@4 LogConfig *go.string."LogConfig"þ"type."".LogConfig°°½zè à runtime.algarray0bruntime.gcbits.0x48888400000000000000000000000000P8go.string."docker.LogConfig"p$type.*"".LogConfig€"runtime.zerovalueÀ"type."".LogConfigÀ go.string."Type"àtype.stringðvgo.string."json:\"Type,omitempty\" yaml:\"Type,omitempty\""$go.string."Config"°,type.map[string]stringÀ~go.string."json:\"Config,omitempty\" yaml:\"Config,omitempty\""`à"type."".LogConfigà*go.string."LogConfig"ð"go.importpath."".€°"type."".LogConfigþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc."".ULimit(type..hash."".ULimitþ,type..eqfunc."".ULimit$type..eq."".ULimitþ&type..alg."".ULimit 0type..hashfunc."".ULimit,type..eqfunc."".ULimitþ4go.string."*docker.ULimit"@>*docker.ULimit 4go.string."*docker.ULimit"þtype.*"".ULimit  Ö‡Á<6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*docker.ULimit"p0go.weak.type.**"".ULimit€"runtime.zerovaluetype."".ULimitþbruntime.gcbits.0x48440000000000000000000000000000 HDþ2go.string."docker.ULimit"@< docker.ULimit 2go.string."docker.ULimit"þ go.string."Soft"0*Soft go.string."Soft"þvgo.string."json:\"Soft,omitempty\" yaml:\"Soft,omitempty\""€x+json:"Soft,omitempty" yaml:"Soft,omitempty" vgo.string."json:\"Soft,omitempty\" yaml:\"Soft,omitempty\""þ go.string."Hard"0*Hard go.string."Hard"þvgo.string."json:\"Hard,omitempty\" yaml:\"Hard,omitempty\""€x+json:"Hard,omitempty" yaml:"Hard,omitempty" vgo.string."json:\"Hard,omitempty\" yaml:\"Hard,omitempty\""þ$go.string."ULimit"0.ULimit $go.string."ULimit"þtype."".ULimit€€ Ú²G& &type..alg."".ULimit0bruntime.gcbits.0x48440000000000000000000000000000P2go.string."docker.ULimit"ptype.*"".ULimit€"runtime.zerovalueÀtype."".ULimitÀ go.string."Name"àtype.stringðvgo.string."json:\"Name,omitempty\" yaml:\"Name,omitempty\"" go.string."Soft"°type.int64Àvgo.string."json:\"Soft,omitempty\" yaml:\"Soft,omitempty\""à go.string."Hard"€type.int64vgo.string."json:\"Hard,omitempty\" yaml:\"Hard,omitempty\""`°type."".ULimit°$go.string."ULimit"À"go.importpath."".Ѐtype."".ULimitþ6go.string."[]docker.ULimit"@@[]docker.ULimit 6go.string."[]docker.ULimit"þ type.[]"".ULimit  ReadonlyRootfs 4go.string."ReadonlyRootfs"þžgo.string."json:\"ReadonlyRootfs,omitempty\" yaml:\"ReadonlyRootfs,omitempty\""  ?json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" žgo.string."json:\"ReadonlyRootfs,omitempty\" yaml:\"ReadonlyRootfs,omitempty\""þ.go.string."SecurityOpt"@8 SecurityOpt .go.string."SecurityOpt"þ’go.string."json:\"SecurityOpt,omitempty\" yaml:\"SecurityOpt,omitempty\"" ”9json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" ’go.string."json:\"SecurityOpt,omitempty\" yaml:\"SecurityOpt,omitempty\""þ0go.string."CgroupParent"@: CgroupParent 0go.string."CgroupParent"þ–go.string."json:\"CgroupParent,omitempty\" yaml:\"CgroupParent,omitempty\"" ˜;json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" –go.string."json:\"CgroupParent,omitempty\" yaml:\"CgroupParent,omitempty\""þ(go.string."CPUQuota"@2CPUQuota (go.string."CPUQuota"þ†go.string."json:\"CpuQuota,omitempty\" yaml:\"CpuQuota,omitempty\""ˆ3json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty" †go.string."json:\"CpuQuota,omitempty\" yaml:\"CpuQuota,omitempty\""þ*go.string."CPUPeriod"@4 CPUPeriod *go.string."CPUPeriod"þŠgo.string."json:\"CpuPeriod,omitempty\" yaml:\"CpuPeriod,omitempty\""Œ5json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty" Šgo.string."json:\"CpuPeriod,omitempty\" yaml:\"CpuPeriod,omitempty\""þ&go.string."Ulimits"00Ulimits &go.string."Ulimits"þ‚go.string."json:\"Ulimits,omitempty\" yaml:\"Ulimits,omitempty\""„1json:"Ulimits,omitempty" yaml:"Ulimits,omitempty" ‚go.string."json:\"Ulimits,omitempty\" yaml:\"Ulimits,omitempty\""þ,go.string."HostConfig"@6 +HostConfig ,go.string."HostConfig"þ$type."".HostConfigðð¶ì,Y0HXpx€˜ ¸Ðè 0@Xpˆ¨¸ÀÈÐàèðÈ à runtime.algarray@4type..gcprog."".HostConfigP:go.string."docker.HostConfig"p&type.*"".HostConfig€"runtime.zerovalueÀ$type."".HostConfigÀ"go.string."Binds"àtype.[]stringðzgo.string."json:\"Binds,omitempty\" yaml:\"Binds,omitempty\""$go.string."CapAdd"°type.[]stringÀ~go.string."json:\"CapAdd,omitempty\" yaml:\"CapAdd,omitempty\""à&go.string."CapDrop"€type.[]string‚go.string."json:\"CapDrop,omitempty\" yaml:\"CapDrop,omitempty\""°6go.string."ContainerIDFile"Ðtype.stringà¢go.string."json:\"ContainerIDFile,omitempty\" yaml:\"ContainerIDFile,omitempty\""€&go.string."LxcConf" ,type.[]"".KeyValuePair°‚go.string."json:\"LxcConf,omitempty\" yaml:\"LxcConf,omitempty\""Ð,go.string."Privileged"ðtype.bool€Žgo.string."json:\"Privileged,omitempty\" yaml:\"Privileged,omitempty\"" 0go.string."PortBindings"ÀBtype.map["".Port][]"".PortBindingЖgo.string."json:\"PortBindings,omitempty\" yaml:\"PortBindings,omitempty\""ð"go.string."Links"type.[]string zgo.string."json:\"Links,omitempty\" yaml:\"Links,omitempty\""À6go.string."PublishAllPorts"àtype.boolð¢go.string."json:\"PublishAllPorts,omitempty\" yaml:\"PublishAllPorts,omitempty\""go.string."DNS"°type.[]stringÀrgo.string."json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""à*go.string."DNSSearch"€type.[]stringŠgo.string."json:\"DnsSearch,omitempty\" yaml:\"DnsSearch,omitempty\""°,go.string."ExtraHosts"Ðtype.[]stringàŽgo.string."json:\"ExtraHosts,omitempty\" yaml:\"ExtraHosts,omitempty\""€ .go.string."VolumesFrom"  type.[]string° ’go.string."json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""Ð .go.string."NetworkMode"ð type.string€ +’go.string."json:\"NetworkMode,omitempty\" yaml:\"NetworkMode,omitempty\""  +&go.string."IpcMode"À +type.stringÐ +‚go.string."json:\"IpcMode,omitempty\" yaml:\"IpcMode,omitempty\""ð +&go.string."PidMode" type.string  ‚go.string."json:\"PidMode,omitempty\" yaml:\"PidMode,omitempty\""À &go.string."UTSMode"à type.stringð ‚go.string."json:\"UTSMode,omitempty\" yaml:\"UTSMode,omitempty\"" 2go.string."RestartPolicy"° *type."".RestartPolicyÀ šgo.string."json:\"RestartPolicy,omitempty\" yaml:\"RestartPolicy,omitempty\""à &go.string."Devices"€  type.[]"".Device ‚go.string."json:\"Devices,omitempty\" yaml:\"Devices,omitempty\""° *go.string."LogConfig"Ð "type."".LogConfigà Šgo.string."json:\"LogConfig,omitempty\" yaml:\"LogConfig,omitempty\""€4go.string."ReadonlyRootfs" type.bool°žgo.string."json:\"ReadonlyRootfs,omitempty\" yaml:\"ReadonlyRootfs,omitempty\""Ð.go.string."SecurityOpt"ðtype.[]string€’go.string."json:\"SecurityOpt,omitempty\" yaml:\"SecurityOpt,omitempty\"" 0go.string."CgroupParent"Àtype.stringЖgo.string."json:\"CgroupParent,omitempty\" yaml:\"CgroupParent,omitempty\""ð$go.string."Memory"type.int64 ~go.string."json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""À,go.string."MemorySwap"àtype.int64ðŽgo.string."json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""*go.string."CPUShares"°type.int64ÀŠgo.string."json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""à$go.string."CPUSet"€type.string~go.string."json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""°(go.string."CPUQuota"Ðtype.int64à†go.string."json:\"CpuQuota,omitempty\" yaml:\"CpuQuota,omitempty\""€*go.string."CPUPeriod" type.int64°Šgo.string."json:\"CpuPeriod,omitempty\" yaml:\"CpuPeriod,omitempty\""Ð&go.string."Ulimits"ð type.[]"".ULimit€‚go.string."json:\"Ulimits,omitempty\" yaml:\"Ulimits,omitempty\""` $type."".HostConfig ,go.string."HostConfig"°"go.importpath."".Àð$type."".HostConfigþ­6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptype.*"".CreateContainerOptions  7tÑ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*docker.CreateContainerOptions"pPgo.weak.type.**"".CreateContainerOptions€"runtime.zerovaluetype.*"".CreateContainerOptions€"runtime.zerovalueÀ à runtime.algarray0bruntime.gcbits.0x48484848448884848444840000000000P8go.string."docker.SwarmNode"p$type.*"".SwarmNode€"runtime.zerovalueÀ"type."".SwarmNodeÀgo.string."ID"àtype.stringðngo.string."json:\"ID,omitempty\" yaml:\"ID,omitempty\""go.string."IP"°type.stringÀngo.string."json:\"IP,omitempty\" yaml:\"IP,omitempty\""à go.string."Addr"€type.stringvgo.string."json:\"Addr,omitempty\" yaml:\"Addr,omitempty\""° go.string."Name"Ðtype.stringàvgo.string."json:\"Name,omitempty\" yaml:\"Name,omitempty\""€ go.string."CPUs" type.int64°vgo.string."json:\"CPUs,omitempty\" yaml:\"CPUs,omitempty\""Ð$go.string."Memory"ðtype.int64€~go.string."json:\"Memory,omitempty\" yaml:\"Memory,omitempty\"" $go.string."Labels"À,type.map[string]stringÐ~go.string."json:\"Labels,omitempty\" yaml:\"Labels,omitempty\""`ð"type."".SwarmNodeð*go.string."SwarmNode"€"go.importpath."".À"type."".SwarmNodeþ:go.string."*docker.SwarmNode"PD*docker.SwarmNode :go.string."*docker.SwarmNode"þ$type.*"".SwarmNode  `L6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.SwarmNode"p6go.weak.type.**"".SwarmNode€"runtime.zerovalue"type."".SwarmNodeþ>go.string."*docker.PortMapping"PH*docker.PortMapping >go.string."*docker.PortMapping"þ(type.*"".PortMapping  iƒÞ‘6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*docker.PortMapping"p:go.weak.type.**"".PortMapping€"runtime.zerovalue&type."".PortMappingþgo.weak.type.*[8]"".PortMapping€"runtime.zerovalue&type."".PortMapping *type.[]"".PortMappingþfgo.typelink.[8]docker.PortMapping/[8]"".PortMapping,type.[8]"".PortMappingþbgo.string."*map.bucket[string]docker.PortMapping"pl%*map.bucket[string]docker.PortMapping bgo.string."*map.bucket[string]docker.PortMapping"þLtype.*map.bucket[string]"".PortMapping  ùè16   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."*map.bucket[string]docker.PortMapping"p^go.weak.type.**map.bucket[string]"".PortMapping€"runtime.zerovalueJtype.map.bucket[string]"".PortMappingþ`go.string."map.bucket[string]docker.PortMapping"pj$map.bucket[string]docker.PortMapping `go.string."map.bucket[string]docker.PortMapping"þJtype.map.bucket[string]"".PortMapping°°ÐêÆ~ˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000P`go.string."map.bucket[string]docker.PortMapping"p\go.weak.type.*map.bucket[string]"".PortMapping€"runtime.zerovalueÀJtype.map.bucket[string]"".PortMappingÀ go.string."keys"àtype.[8]string$go.string."values"°,type.[8]"".PortMappingà(go.string."overflow"€Ltype.*map.bucket[string]"".PortMappingþZgo.string."map.hdr[string]docker.PortMapping"pd!map.hdr[string]docker.PortMapping Zgo.string."map.hdr[string]docker.PortMapping"þDtype.map.hdr[string]"".PortMappingàà0ÏAgà  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PZgo.string."map.hdr[string]docker.PortMapping"pVgo.weak.type.*map.hdr[string]"".PortMapping€"runtime.zerovalueÀDtype.map.hdr[string]"".PortMappingÀ&go.string."buckets"àLtype.*map.bucket[string]"".PortMapping,go.string."oldbuckets"°Ltype.*map.bucket[string]"".PortMappingþRgo.string."map[string]docker.PortMapping"`\map[string]docker.PortMapping Rgo.string."map[string]docker.PortMapping"þtype..gcprog."".NetworkSettings–™©™YfYþDgo.string."docker.NetworkSettings"PNdocker.NetworkSettings Dgo.string."docker.NetworkSettings"þ*go.string."IPAddress"@4 IPAddress *go.string."IPAddress"þŠgo.string."json:\"IPAddress,omitempty\" yaml:\"IPAddress,omitempty\""Œ5json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" Šgo.string."json:\"IPAddress,omitempty\" yaml:\"IPAddress,omitempty\""þ.go.string."IPPrefixLen"@8 IPPrefixLen .go.string."IPPrefixLen"þ’go.string."json:\"IPPrefixLen,omitempty\" yaml:\"IPPrefixLen,omitempty\"" ”9json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" ’go.string."json:\"IPPrefixLen,omitempty\" yaml:\"IPPrefixLen,omitempty\""þ&go.string."Gateway"00Gateway &go.string."Gateway"þ‚go.string."json:\"Gateway,omitempty\" yaml:\"Gateway,omitempty\""„1json:"Gateway,omitempty" yaml:"Gateway,omitempty" ‚go.string."json:\"Gateway,omitempty\" yaml:\"Gateway,omitempty\""þ$go.string."Bridge"0.Bridge $go.string."Bridge"þ~go.string."json:\"Bridge,omitempty\" yaml:\"Bridge,omitempty\""€€/json:"Bridge,omitempty" yaml:"Bridge,omitempty" ~go.string."json:\"Bridge,omitempty\" yaml:\"Bridge,omitempty\""þ’go.string."json:\"PortMapping,omitempty\" yaml:\"PortMapping,omitempty\"" ”9json:"PortMapping,omitempty" yaml:"PortMapping,omitempty" ’go.string."json:\"PortMapping,omitempty\" yaml:\"PortMapping,omitempty\""þ"go.string."Ports"0,Ports "go.string."Ports"þzgo.string."json:\"Ports,omitempty\" yaml:\"Ports,omitempty\""€|-json:"Ports,omitempty" yaml:"Ports,omitempty" zgo.string."json:\"Ports,omitempty\" yaml:\"Ports,omitempty\""þ*go.string."NetworkID"@4 NetworkID *go.string."NetworkID"þŠgo.string."json:\"NetworkID,omitempty\" yaml:\"NetworkID,omitempty\""Œ5json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" Šgo.string."json:\"NetworkID,omitempty\" yaml:\"NetworkID,omitempty\""þ,go.string."EndpointID"@6 +EndpointID ,go.string."EndpointID"þŽgo.string."json:\"EndpointID,omitempty\" yaml:\"EndpointID,omitempty\""7json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" Žgo.string."json:\"EndpointID,omitempty\" yaml:\"EndpointID,omitempty\""þ,go.string."SandboxKey"@6 +SandboxKey ,go.string."SandboxKey"þŽgo.string."json:\"SandboxKey,omitempty\" yaml:\"SandboxKey,omitempty\""7json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty" Žgo.string."json:\"SandboxKey,omitempty\" yaml:\"SandboxKey,omitempty\""þ:go.string."GlobalIPv6Address"PDGlobalIPv6Address :go.string."GlobalIPv6Address"þªgo.string."json:\"GlobalIPv6Address,omitempty\" yaml:\"GlobalIPv6Address,omitempty\""°¬Ejson:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" ªgo.string."json:\"GlobalIPv6Address,omitempty\" yaml:\"GlobalIPv6Address,omitempty\""þ>go.string."GlobalIPv6PrefixLen"PHGlobalIPv6PrefixLen >go.string."GlobalIPv6PrefixLen"þ²go.string."json:\"GlobalIPv6PrefixLen,omitempty\" yaml:\"GlobalIPv6PrefixLen,omitempty\""À´Ijson:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" ²go.string."json:\"GlobalIPv6PrefixLen,omitempty\" yaml:\"GlobalIPv6PrefixLen,omitempty\""þ.go.string."IPv6Gateway"@8 IPv6Gateway .go.string."IPv6Gateway"þ’go.string."json:\"IPv6Gateway,omitempty\" yaml:\"IPv6Gateway,omitempty\"" ”9json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" ’go.string."json:\"IPv6Gateway,omitempty\" yaml:\"IPv6Gateway,omitempty\""þ@go.string."LinkLocalIPv6Address"PJLinkLocalIPv6Address @go.string."LinkLocalIPv6Address"þ¶go.string."json:\"LinkLocalIPv6Address,omitempty\" yaml:\"LinkLocalIPv6Address,omitempty\""À¸Kjson:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty" ¶go.string."json:\"LinkLocalIPv6Address,omitempty\" yaml:\"LinkLocalIPv6Address,omitempty\""þDgo.string."LinkLocalIPv6PrefixLen"PNLinkLocalIPv6PrefixLen Dgo.string."LinkLocalIPv6PrefixLen"þ¾go.string."json:\"LinkLocalIPv6PrefixLen,omitempty\" yaml:\"LinkLocalIPv6PrefixLen,omitempty\""ÀÀOjson:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty" ¾go.string."json:\"LinkLocalIPv6PrefixLen,omitempty\" yaml:\"LinkLocalIPv6PrefixLen,omitempty\""þ@go.string."SecondaryIPAddresses"PJSecondaryIPAddresses @go.string."SecondaryIPAddresses"þ¶go.string."json:\"SecondaryIPAddresses,omitempty\" yaml:\"SecondaryIPAddresses,omitempty\""À¸Kjson:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty" ¶go.string."json:\"SecondaryIPAddresses,omitempty\" yaml:\"SecondaryIPAddresses,omitempty\""þDgo.string."SecondaryIPv6Addresses"PNSecondaryIPv6Addresses Dgo.string."SecondaryIPv6Addresses"þ¾go.string."json:\"SecondaryIPv6Addresses,omitempty\" yaml:\"SecondaryIPv6Addresses,omitempty\""ÀÀOjson:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty" ¾go.string."json:\"SecondaryIPv6Addresses,omitempty\" yaml:\"SecondaryIPv6Addresses,omitempty\""þ6go.string."NetworkSettings"@@NetworkSettings 6go.string."NetworkSettings"þ.type."".NetworkSettingsà à ø‹ä¡ÀY(8HPXhxˆ˜ °ÀÈà| à runtime.algarray06type..gc."".NetworkSettings@>type..gcprog."".NetworkSettingsPDgo.string."docker.NetworkSettings"p0type.*"".NetworkSettings€"runtime.zerovalueÀ.type."".NetworkSettingsÀ*go.string."IPAddress"àtype.stringðŠgo.string."json:\"IPAddress,omitempty\" yaml:\"IPAddress,omitempty\"".go.string."IPPrefixLen"°type.intÀ’go.string."json:\"IPPrefixLen,omitempty\" yaml:\"IPPrefixLen,omitempty\""à,go.string."MacAddress"€type.stringŽgo.string."json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""°&go.string."Gateway"Ðtype.stringà‚go.string."json:\"Gateway,omitempty\" yaml:\"Gateway,omitempty\""€$go.string."Bridge" type.string°~go.string."json:\"Bridge,omitempty\" yaml:\"Bridge,omitempty\""Ð.go.string."PortMapping"ðgo.string."GlobalIPv6PrefixLen"Ðtype.intà²go.string."json:\"GlobalIPv6PrefixLen,omitempty\" yaml:\"GlobalIPv6PrefixLen,omitempty\""€ .go.string."IPv6Gateway"  type.string° ’go.string."json:\"IPv6Gateway,omitempty\" yaml:\"IPv6Gateway,omitempty\""Ð @go.string."LinkLocalIPv6Address"ð type.string€ +¶go.string."json:\"LinkLocalIPv6Address,omitempty\" yaml:\"LinkLocalIPv6Address,omitempty\""  +Dgo.string."LinkLocalIPv6PrefixLen"À +type.intÐ +¾go.string."json:\"LinkLocalIPv6PrefixLen,omitempty\" yaml:\"LinkLocalIPv6PrefixLen,omitempty\""ð +@go.string."SecondaryIPAddresses" type.[]string  ¶go.string."json:\"SecondaryIPAddresses,omitempty\" yaml:\"SecondaryIPAddresses,omitempty\""À Dgo.string."SecondaryIPv6Addresses"à type.[]stringð ¾go.string."json:\"SecondaryIPv6Addresses,omitempty\" yaml:\"SecondaryIPv6Addresses,omitempty\""` .type."".NetworkSettings 6go.string."NetworkSettings"  "go.importpath."".° à .type."".NetworkSettingsþFgo.string."*docker.NetworkSettings"PP*docker.NetworkSettings Fgo.string."*docker.NetworkSettings"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ2type..hashfunc."".APIPort*type..hash."".APIPortþ.type..eqfunc."".APIPort&type..eq."".APIPortþ(type..alg."".APIPort 2type..hashfunc."".APIPort.type..eqfunc."".APIPortþ6go.string."*docker.APIPort"@@*docker.APIPort 6go.string."*docker.APIPort"þ type.*"".APIPort  ‡— Q6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*docker.APIPort"p2go.weak.type.**"".APIPort€"runtime.zerovaluetype."".APIPortþbruntime.gcbits.0x44484800000000000000000000000000 DHHþ4go.string."docker.APIPort"@>docker.APIPort 4go.string."docker.APIPort"þ.go.string."PrivatePort"@8 PrivatePort .go.string."PrivatePort"þ’go.string."json:\"PrivatePort,omitempty\" yaml:\"PrivatePort,omitempty\"" ”9json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty" ’go.string."json:\"PrivatePort,omitempty\" yaml:\"PrivatePort,omitempty\""þ,go.string."PublicPort"@6 +PublicPort ,go.string."PublicPort"þŽgo.string."json:\"PublicPort,omitempty\" yaml:\"PublicPort,omitempty\""7json:"PublicPort,omitempty" yaml:"PublicPort,omitempty" Žgo.string."json:\"PublicPort,omitempty\" yaml:\"PublicPort,omitempty\""þ&go.string."APIPort"00APIPort &go.string."APIPort"þtype."".APIPortÐÐ0ÂoWŽ , (type..alg."".APIPort0bruntime.gcbits.0x44484800000000000000000000000000P4go.string."docker.APIPort"p type.*"".APIPort€"runtime.zerovalueÀtype."".APIPortÀ.go.string."PrivatePort"àtype.int64ð’go.string."json:\"PrivatePort,omitempty\" yaml:\"PrivatePort,omitempty\"",go.string."PublicPort"°type.int64ÀŽgo.string."json:\"PublicPort,omitempty\" yaml:\"PublicPort,omitempty\""à go.string."Type"€type.stringvgo.string."json:\"Type,omitempty\" yaml:\"Type,omitempty\""°go.string."IP"Ðtype.stringàngo.string."json:\"IP,omitempty\" yaml:\"IP,omitempty\""`€type."".APIPort€&go.string."APIPort""go.importpath."". Ðtype."".APIPortþ8go.string."[]docker.APIPort"PB[]docker.APIPort 8go.string."[]docker.APIPort"þ"type.[]"".APIPort  «¹$   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P8go.string."[]docker.APIPort"p4go.weak.type.*[]"".APIPort€"runtime.zerovaluetype."".APIPortþRgo.typelink.[]docker.APIPort/[]"".APIPort"type.[]"".APIPortþtgo.string."func(*docker.NetworkSettings) []docker.APIPort"€~.func(*docker.NetworkSettings) []docker.APIPort tgo.string."func(*docker.NetworkSettings) []docker.APIPort"þVtype.func(*"".NetworkSettings) []"".APIPort  •ü¡3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."func(*docker.NetworkSettings) []docker.APIPort"phgo.weak.type.*func(*"".NetworkSettings) []"".APIPort€"runtime.zerovalue €Vtype.func(*"".NetworkSettings) []"".APIPortÐVtype.func(*"".NetworkSettings) []"".APIPort€0type.*"".NetworkSettings"type.[]"".APIPortþ4go.string."PortMappingAPI"@>PortMappingAPI 4go.string."PortMappingAPI"þFgo.string."func() []docker.APIPort"PPfunc() []docker.APIPort Fgo.string."func() []docker.APIPort"þ0type.func() []"".APIPortø:‚x3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."func() []docker.APIPort"pBgo.weak.type.*func() []"".APIPort€"runtime.zerovalue €0type.func() []"".APIPortЀ0type.func() []"".APIPort€"type.[]"".APIPortþ0type.*"".NetworkSettingsÐÐÌVB6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*docker.NetworkSettings"pBgo.weak.type.**"".NetworkSettings€"runtime.zerovalue.type."".NetworkSettings` 0type.*"".NetworkSettingsÀð0type.*"".NetworkSettingsð4go.string."PortMappingAPI"0type.func() []"".APIPort Vtype.func(*"".NetworkSettings) []"".APIPort°H"".(*NetworkSettings).PortMappingAPIÀH"".(*NetworkSettings).PortMappingAPIþ$go.string."[]bool"0.[]bool $go.string."[]bool"þtype.[]bool  ±åç   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P$go.string."[]bool"p(go.weak.type.*[]bool€"runtime.zerovaluetype.boolþ2go.typelink.[]bool/[]booltype.[]boolþ&go.string."[8]bool"00[8]bool &go.string."[8]bool"þtype.[8]boolÀÀs£5‘   runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P&go.string."[8]bool"p*go.weak.type.*[8]bool€"runtime.zerovaluetype.bool type.[]boolþ6go.typelink.[8]bool/[8]booltype.[8]boolþFgo.string."*map.bucket[string]bool"PP*map.bucket[string]bool Fgo.string."*map.bucket[string]bool"þ8type.*map.bucket[string]bool  ë[ÔE6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*map.bucket[string]bool"pJgo.weak.type.**map.bucket[string]bool€"runtime.zerovalue6type.map.bucket[string]boolþ,>type..gc.map.bucket[string]bool(þFtype..gcprog.map.bucket[string]bool™™™™%þDgo.string."map.bucket[string]bool"PNmap.bucket[string]bool Dgo.string."map.bucket[string]bool"þ6type.map.bucket[string]bool°°˜2aBÝYˆ à runtime.algarray0>type..gc.map.bucket[string]bool@Ftype..gcprog.map.bucket[string]boolPDgo.string."map.bucket[string]bool"pHgo.weak.type.*map.bucket[string]bool€"runtime.zerovalueÀ6type.map.bucket[string]boolÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]boolà(go.string."overflow"€8type.*map.bucket[string]boolþ>go.string."map.hdr[string]bool"PHmap.hdr[string]bool >go.string."map.hdr[string]bool"þ0type.map.hdr[string]boolàà03‡(  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000P>go.string."map.hdr[string]bool"pBgo.weak.type.*map.hdr[string]bool€"runtime.zerovalueÀ0type.map.hdr[string]boolÀ&go.string."buckets"à8type.*map.bucket[string]bool,go.string."oldbuckets"°8type.*map.bucket[string]boolþ6go.string."map[string]bool"@@map[string]bool 6go.string."map[string]bool"þ(type.map[string]boolÜÜñÓ5˜ € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."map[string]bool"p:go.weak.type.*map[string]bool€"runtime.zerovaluetype.string type.bool°6type.map.bucket[string]boolÀ0type.map.hdr[string]boolþVgo.typelink.map[string]bool/map[string]bool(type.map[string]boolþ,*type..gc."".Containerdþ2type..gcprog."".Container 1Všeeeijfffª•þ8go.string."docker.Container"PBdocker.Container 8go.string."docker.Container"þvgo.string."json:\"Path,omitempty\" yaml:\"Path,omitempty\""€x+json:"Path,omitempty" yaml:"Path,omitempty" vgo.string."json:\"Path,omitempty\" yaml:\"Path,omitempty\""þ go.string."Args"0*Args go.string."Args"þvgo.string."json:\"Args,omitempty\" yaml:\"Args,omitempty\""€x+json:"Args,omitempty" yaml:"Args,omitempty" vgo.string."json:\"Args,omitempty\" yaml:\"Args,omitempty\""þzgo.string."json:\"State,omitempty\" yaml:\"State,omitempty\""€|-json:"State,omitempty" yaml:"State,omitempty" zgo.string."json:\"State,omitempty\" yaml:\"State,omitempty\""þ go.string."Node"0*Node go.string."Node"þvgo.string."json:\"Node,omitempty\" yaml:\"Node,omitempty\""€x+json:"Node,omitempty" yaml:"Node,omitempty" vgo.string."json:\"Node,omitempty\" yaml:\"Node,omitempty\""þ¢go.string."json:\"NetworkSettings,omitempty\" yaml:\"NetworkSettings,omitempty\""°¤Ajson:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" ¢go.string."json:\"NetworkSettings,omitempty\" yaml:\"NetworkSettings,omitempty\""þ.go.string."SysInitPath"@8 SysInitPath .go.string."SysInitPath"þ’go.string."json:\"SysInitPath,omitempty\" yaml:\"SysInitPath,omitempty\"" ”9json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty" ’go.string."json:\"SysInitPath,omitempty\" yaml:\"SysInitPath,omitempty\""þ4go.string."ResolvConfPath"@>ResolvConfPath 4go.string."ResolvConfPath"þžgo.string."json:\"ResolvConfPath,omitempty\" yaml:\"ResolvConfPath,omitempty\""  ?json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty" žgo.string."json:\"ResolvConfPath,omitempty\" yaml:\"ResolvConfPath,omitempty\""þ0go.string."HostnamePath"@: HostnamePath 0go.string."HostnamePath"þ–go.string."json:\"HostnamePath,omitempty\" yaml:\"HostnamePath,omitempty\"" ˜;json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty" –go.string."json:\"HostnamePath,omitempty\" yaml:\"HostnamePath,omitempty\""þ*go.string."HostsPath"@4 HostsPath *go.string."HostsPath"þŠgo.string."json:\"HostsPath,omitempty\" yaml:\"HostsPath,omitempty\""Œ5json:"HostsPath,omitempty" yaml:"HostsPath,omitempty" Šgo.string."json:\"HostsPath,omitempty\" yaml:\"HostsPath,omitempty\""þ&go.string."LogPath"00LogPath &go.string."LogPath"þ‚go.string."json:\"LogPath,omitempty\" yaml:\"LogPath,omitempty\""„1json:"LogPath,omitempty" yaml:"LogPath,omitempty" ‚go.string."json:\"LogPath,omitempty\" yaml:\"LogPath,omitempty\""þ$go.string."Driver"0.Driver $go.string."Driver"þ~go.string."json:\"Driver,omitempty\" yaml:\"Driver,omitempty\""€€/json:"Driver,omitempty" yaml:"Driver,omitempty" ~go.string."json:\"Driver,omitempty\" yaml:\"Driver,omitempty\""þ*go.string."VolumesRW"@4 VolumesRW *go.string."VolumesRW"þŠgo.string."json:\"VolumesRW,omitempty\" yaml:\"VolumesRW,omitempty\""Œ5json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty" Šgo.string."json:\"VolumesRW,omitempty\" yaml:\"VolumesRW,omitempty\""þŽgo.string."json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\""7json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" Žgo.string."json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\""þ&go.string."ExecIDs"00ExecIDs &go.string."ExecIDs"þ‚go.string."json:\"ExecIDs,omitempty\" yaml:\"ExecIDs,omitempty\""„1json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty" ‚go.string."json:\"ExecIDs,omitempty\" yaml:\"ExecIDs,omitempty\""þ0go.string."RestartCount"@: RestartCount 0go.string."RestartCount"þ–go.string."json:\"RestartCount,omitempty\" yaml:\"RestartCount,omitempty\"" ˜;json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" –go.string."json:\"RestartCount,omitempty\" yaml:\"RestartCount,omitempty\""þ6go.string."AppArmorProfile"@@AppArmorProfile 6go.string."AppArmorProfile"þ¢go.string."json:\"AppArmorProfile,omitempty\" yaml:\"AppArmorProfile,omitempty\""°¤Ajson:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" ¢go.string."json:\"AppArmorProfile,omitempty\" yaml:\"AppArmorProfile,omitempty\""þ"type."".ContainerððˆÀ5SdY(8PX°ÀÈÐàð 0@HPXpxš à runtime.algarray0*type..gc."".Container@2type..gcprog."".ContainerP8go.string."docker.Container"p$type.*"".Container€"runtime.zerovalueÀ"type."".ContainerÀgo.string."ID"àtype.stringðFgo.string."json:\"Id\" yaml:\"Id\""&go.string."Created"°type.time.TimeÀ‚go.string."json:\"Created,omitempty\" yaml:\"Created,omitempty\""à go.string."Path"€type.stringvgo.string."json:\"Path,omitempty\" yaml:\"Path,omitempty\""° go.string."Args"Ðtype.[]stringàvgo.string."json:\"Args,omitempty\" yaml:\"Args,omitempty\""€$go.string."Config" type.*"".Config°~go.string."json:\"Config,omitempty\" yaml:\"Config,omitempty\""Ð"go.string."State"ðtype."".State€zgo.string."json:\"State,omitempty\" yaml:\"State,omitempty\"" "go.string."Image"Àtype.stringÐzgo.string."json:\"Image,omitempty\" yaml:\"Image,omitempty\""ð go.string."Node"$type.*"".SwarmNode vgo.string."json:\"Node,omitempty\" yaml:\"Node,omitempty\""À6go.string."NetworkSettings"à0type.*"".NetworkSettingsð¢go.string."json:\"NetworkSettings,omitempty\" yaml:\"NetworkSettings,omitempty\"".go.string."SysInitPath"°type.stringÀ’go.string."json:\"SysInitPath,omitempty\" yaml:\"SysInitPath,omitempty\""à4go.string."ResolvConfPath"€type.stringžgo.string."json:\"ResolvConfPath,omitempty\" yaml:\"ResolvConfPath,omitempty\""°0go.string."HostnamePath"Ðtype.stringà–go.string."json:\"HostnamePath,omitempty\" yaml:\"HostnamePath,omitempty\""€ *go.string."HostsPath"  type.string° Šgo.string."json:\"HostsPath,omitempty\" yaml:\"HostsPath,omitempty\""Ð &go.string."LogPath"ð type.string€ +‚go.string."json:\"LogPath,omitempty\" yaml:\"LogPath,omitempty\""  + go.string."Name"À +type.stringÐ +vgo.string."json:\"Name,omitempty\" yaml:\"Name,omitempty\""ð +$go.string."Driver" type.string  ~go.string."json:\"Driver,omitempty\" yaml:\"Driver,omitempty\""À &go.string."Volumes"à ,type.map[string]stringð ‚go.string."json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\"" *go.string."VolumesRW"° (type.map[string]boolÀ Šgo.string."json:\"VolumesRW,omitempty\" yaml:\"VolumesRW,omitempty\""à ,go.string."HostConfig"€ &type.*"".HostConfig Žgo.string."json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\""° &go.string."ExecIDs"Ð type.[]stringà ‚go.string."json:\"ExecIDs,omitempty\" yaml:\"ExecIDs,omitempty\""€0go.string."RestartCount" type.int°–go.string."json:\"RestartCount,omitempty\" yaml:\"RestartCount,omitempty\""Ð6go.string."AppArmorProfile"ðtype.string€¢go.string."json:\"AppArmorProfile,omitempty\" yaml:\"AppArmorProfile,omitempty\""` "type."".Container *go.string."Container"°"go.importpath."".Àð"type."".Containerþ:go.string."*docker.Container"PD*docker.Container :go.string."*docker.Container"þ$type.*"".Container  ¯ÅÕß6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.Container"p6go.weak.type.**"".Container€"runtime.zerovalue"type."".Containerþ´go.string."func(*docker.Client, docker.CreateContainerOptions) (*docker.Container, error)"À¾Nfunc(*docker.Client, docker.CreateContainerOptions) (*docker.Container, error) ´go.string."func(*docker.Client, docker.CreateContainerOptions) (*docker.Container, error)"þŽtype.func(*"".Client, "".CreateContainerOptions) (*"".Container, error)ÀÀk73 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P´go.string."func(*docker.Client, docker.CreateContainerOptions) (*docker.Container, error)"p go.weak.type.*func(*"".Client, "".CreateContainerOptions) (*"".Container, error)€"runtime.zerovalue €Žtype.func(*"".Client, "".CreateContainerOptions) (*"".Container, error)РŽtype.func(*"".Client, "".CreateContainerOptions) (*"".Container, error)€type.*"".Client à runtime.algarray0bruntime.gcbits.0x84444848000000000000000000000000PHgo.string."docker.CreateExecOptions"p4type.*"".CreateExecOptions€"runtime.zerovalueÀ2type."".CreateExecOptionsÀ.go.string."AttachStdin"àtype.boolð’go.string."json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\""0go.string."AttachStdout"°type.boolÀ–go.string."json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\""à0go.string."AttachStderr"€type.bool–go.string."json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\""°go.string."Tty"Ðtype.boolàrgo.string."json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""€go.string."Cmd" type.[]string°rgo.string."json:\"Cmd,omitempty\" yaml:\"Cmd,omitempty\""Ð*go.string."Container"ðtype.string€Šgo.string."json:\"Container,omitempty\" yaml:\"Container,omitempty\""  go.string."User"Àtype.stringÐvgo.string."json:\"User,omitempty\" yaml:\"User,omitempty\""`ð2type."".CreateExecOptionsð:go.string."CreateExecOptions"€"go.importpath."".À2type."".CreateExecOptionsþ.go.string."docker.Exec"@8 docker.Exec .go.string."docker.Exec"þngo.string."json:\"Id,omitempty\" yaml:\"Id,omitempty\""pp'json:"Id,omitempty" yaml:"Id,omitempty" ngo.string."json:\"Id,omitempty\" yaml:\"Id,omitempty\""þ go.string."Exec"0*Exec go.string."Exec"þtype."".ExecààCÍ€© À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000P.go.string."docker.Exec"ptype.*"".Exec€"runtime.zerovalueÀtype."".ExecÀgo.string."ID"àtype.stringðngo.string."json:\"Id,omitempty\" yaml:\"Id,omitempty\""`type."".Exec go.string."Exec" "go.importpath."".°àtype."".Execþ0go.string."*docker.Exec"@: *docker.Exec 0go.string."*docker.Exec"þtype.*"".Exec  Ά‘ 6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."*docker.Exec"p,go.weak.type.**"".Exec€"runtime.zerovaluetype."".Execþ go.string."func(*docker.Client, docker.CreateExecOptions) (*docker.Exec, error)"°ªDfunc(*docker.Client, docker.CreateExecOptions) (*docker.Exec, error)  go.string."func(*docker.Client, docker.CreateExecOptions) (*docker.Exec, error)"þztype.func(*"".Client, "".CreateExecOptions) (*"".Exec, error)ÀÀT!á3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P go.string."func(*docker.Client, docker.CreateExecOptions) (*docker.Exec, error)"pŒgo.weak.type.*func(*"".Client, "".CreateExecOptions) (*"".Exec, error)€"runtime.zerovalue €ztype.func(*"".Client, "".CreateExecOptions) (*"".Exec, error)Рztype.func(*"".Client, "".CreateExecOptions) (*"".Exec, error)€type.*"".Client2type."".CreateExecOptions type.*"".Exec°type.errorþbruntime.gcbits.0xcc000000000000000000000000000000 Ìþ0go.string."interface {}"@: interface {} 0go.string."interface {}"þ"type.interface {}ÀÀçW  € runtime.algarray0bruntime.gcbits.0xcc000000000000000000000000000000P0go.string."interface {}"p4go.weak.type.*interface {}€"runtime.zerovalueÀ"type.interface {}þ4go.string."[]interface {}"@>[]interface {} 4go.string."[]interface {}"þ&type.[]interface {}  p“ê/   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]interface {}"p8go.weak.type.*[]interface {}€"runtime.zerovalue"type.interface {}þRgo.typelink.[]interface {}/[]interface {}&type.[]interface {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þjson:"options" 8go.string."json:\"options\""þ@go.string."CreateNetworkOptions"PJCreateNetworkOptions @go.string."CreateNetworkOptions"þ8type."".CreateNetworkOptions€€(bH>? & à runtime.algarray0bruntime.gcbits.0x48488884840000000000000000000000PNgo.string."docker.CreateNetworkOptions"p:type.*"".CreateNetworkOptions€"runtime.zerovalueÀ8type."".CreateNetworkOptionsÀ go.string."Name"àtype.stringð2go.string."json:\"name\"".go.string."NetworkType"°type.stringÀBgo.string."json:\"network_type\""à&go.string."Options"€8type.map[string]interface {}8go.string."json:\"options\""`°8type."".CreateNetworkOptions°@go.string."CreateNetworkOptions"À"go.importpath."".Ѐ8type."".CreateNetworkOptionsþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ4type..hashfunc."".Endpoint,type..hash."".Endpointþ0type..eqfunc."".Endpoint(type..eq."".Endpointþ*type..alg."".Endpoint 4type..hashfunc."".Endpoint0type..eqfunc."".Endpointþ6go.string."docker.Endpoint"@@docker.Endpoint 6go.string."docker.Endpoint"þ.go.string."json:\"id\""@4 json:"id" .go.string."json:\"id\""þ&go.string."Network"00Network &go.string."Network"þ8go.string."json:\"network\""@>json:"network" 8go.string."json:\"network\""þ(go.string."Endpoint"@2Endpoint (go.string."Endpoint"þ type."".Endpoint€€0Öà & *type..alg."".Endpoint0bruntime.gcbits.0x48484800000000000000000000000000P6go.string."docker.Endpoint"p"type.*"".Endpoint€"runtime.zerovalueÀ type."".EndpointÀ go.string."Name"àtype.stringð2go.string."json:\"name\""go.string."ID"°type.stringÀ.go.string."json:\"id\""à&go.string."Network"€type.string8go.string."json:\"network\""`° type."".Endpoint°(go.string."Endpoint"À"go.importpath."".Ѐ type."".Endpointþ8go.string."*docker.Endpoint"PB*docker.Endpoint 8go.string."*docker.Endpoint"þ"type.*"".Endpoint  Wðìt6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*docker.Endpoint"p4go.weak.type.**"".Endpoint€"runtime.zerovalue type."".Endpointþdocker.Network 4go.string."docker.Network"þ2go.string."json:\"type\""@8 json:"type" 2go.string."json:\"type\""þ*go.string."Endpoints"@4 Endpoints *go.string."Endpoints"þtype.*"".ExportContainerOptions  –ü/‚6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*docker.ExportContainerOptions"pPgo.weak.type.**"".ExportContainerOptions€"runtime.zerovaluetype.*"".ExportContainerOptions€"runtime.zerovalueÀtype..alg."".ExportImageOptions Htype..hashfunc."".ExportImageOptionsDtype..eqfunc."".ExportImageOptionsþLgo.string."*docker.ExportImageOptions"`V*docker.ExportImageOptions Lgo.string."*docker.ExportImageOptions"þ6type.*"".ExportImageOptions  Þ¼´6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."*docker.ExportImageOptions"pHgo.weak.type.**"".ExportImageOptions€"runtime.zerovalue4type."".ExportImageOptionsþJgo.string."docker.ExportImageOptions"`Tdocker.ExportImageOptions Jgo.string."docker.ExportImageOptions"þtype..alg."".ExportImageOptions0bruntime.gcbits.0x488c0000000000000000000000000000PJgo.string."docker.ExportImageOptions"p6type.*"".ExportImageOptions€"runtime.zerovalueÀ4type."".ExportImageOptionsÀ go.string."Name"àtype.string0go.string."OutputStream"°type.io.Writer`à4type."".ExportImageOptionsàgo.string."ExportImagesOptions"PHExportImagesOptions >go.string."ExportImagesOptions"þ6type."".ExportImagesOptions°°(œ2Wô à runtime.algarray0bruntime.gcbits.0x48c488448c0000000000000000000000PLgo.string."docker.ExportImagesOptions"p8type.*"".ExportImagesOptions€"runtime.zerovalueÀ6type."".ExportImagesOptionsÀ"go.string."Names"àtype.[]string0go.string."OutputStream"°type.io.WriterÀ(go.string."qs:\"-\""`à6type."".ExportImagesOptionsà>go.string."ExportImagesOptions"ð"go.importpath."".€°6type."".ExportImagesOptionsþ„go.string."func(*docker.Client, docker.ExportImagesOptions) error"Ž6func(*docker.Client, docker.ExportImagesOptions) error „go.string."func(*docker.Client, docker.ExportImagesOptions) error"þftype.func(*"".Client, "".ExportImagesOptions) error°°¤ù'Ì3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P„go.string."func(*docker.Client, docker.ExportImagesOptions) error"pxgo.weak.type.*func(*"".Client, "".ExportImagesOptions) error€"runtime.zerovalue €ftype.func(*"".Client, "".ExportImagesOptions) errorРftype.func(*"".Client, "".ExportImagesOptions) error€type.*"".Client6type."".ExportImagesOptions type.errorþ@go.string."*docker.ImageHistory"PJ*docker.ImageHistory @go.string."*docker.ImageHistory"þ*type.*"".ImageHistory  RYÈ?6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."*docker.ImageHistory"pgo.string."docker.ImageHistory"PHdocker.ImageHistory >go.string."docker.ImageHistory"þ go.string."Tags"0*Tags go.string."Tags"þvgo.string."json:\"Tags,omitempty\" yaml:\"Tags,omitempty\""€x+json:"Tags,omitempty" yaml:"Tags,omitempty" vgo.string."json:\"Tags,omitempty\" yaml:\"Tags,omitempty\""þ*go.string."CreatedBy"@4 CreatedBy *go.string."CreatedBy"þŠgo.string."json:\"CreatedBy,omitempty\" yaml:\"CreatedBy,omitempty\""Œ5json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" Šgo.string."json:\"CreatedBy,omitempty\" yaml:\"CreatedBy,omitempty\""þ0go.string."ImageHistory"@: ImageHistory 0go.string."ImageHistory"þ(type."".ImageHistory  H[Ę(0@2 à runtime.algarray0bruntime.gcbits.0x48484448848444844400000000000000P>go.string."docker.ImageHistory"p*type.*"".ImageHistory€"runtime.zerovalueÀ(type."".ImageHistoryÀgo.string."ID"àtype.stringðFgo.string."json:\"Id\" yaml:\"Id\"" go.string."Tags"°type.[]stringÀvgo.string."json:\"Tags,omitempty\" yaml:\"Tags,omitempty\""à&go.string."Created"€type.int64‚go.string."json:\"Created,omitempty\" yaml:\"Created,omitempty\""°*go.string."CreatedBy"Ðtype.stringàŠgo.string."json:\"CreatedBy,omitempty\" yaml:\"CreatedBy,omitempty\""€ go.string."Size" type.int64°vgo.string."json:\"Size,omitempty\" yaml:\"Size,omitempty\""`Ð(type."".ImageHistoryÐ0go.string."ImageHistory"à"go.importpath."".ð (type."".ImageHistoryþBgo.string."[]docker.ImageHistory"PL[]docker.ImageHistory Bgo.string."[]docker.ImageHistory"þ,type.[]"".ImageHistory  }ôc\   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PBgo.string."[]docker.ImageHistory"p>go.weak.type.*[]"".ImageHistory€"runtime.zerovalue(type."".ImageHistoryþfgo.typelink.[]docker.ImageHistory/[]"".ImageHistory,type.[]"".ImageHistoryþŽgo.string."func(*docker.Client, string) ([]docker.ImageHistory, error)" ˜;func(*docker.Client, string) ([]docker.ImageHistory, error) Žgo.string."func(*docker.Client, string) ([]docker.ImageHistory, error)"þptype.func(*"".Client, string) ([]"".ImageHistory, error)ÀÀ+ÉŒM3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(*docker.Client, string) ([]docker.ImageHistory, error)"p‚go.weak.type.*func(*"".Client, string) ([]"".ImageHistory, error)€"runtime.zerovalue €ptype.func(*"".Client, string) ([]"".ImageHistory, error)Рptype.func(*"".Client, string) ([]"".ImageHistory, error)€type.*"".Clienttype.string ,type.[]"".ImageHistory°type.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·af3107c17ee1ab6f9f33230b5c7e3062þTgclocals·9c703c5c7b9c1932c840b69f8ebce236þHtype..hashfunc."".ImportImageOptions@type..hash."".ImportImageOptionsþDtype..eqfunc."".ImportImageOptionstype..alg."".ImportImageOptions Htype..hashfunc."".ImportImageOptionsDtype..eqfunc."".ImportImageOptionsþLgo.string."*docker.ImportImageOptions"`V*docker.ImportImageOptions Lgo.string."*docker.ImportImageOptions"þ6type.*"".ImportImageOptions  5Œ¸6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."*docker.ImportImageOptions"pHgo.weak.type.**"".ImportImageOptions€"runtime.zerovalue4type."".ImportImageOptionsþbruntime.gcbits.0x4848488c8c848484c4c8480000000000 HHHŒŒ„„„ÄÈHþJgo.string."docker.ImportImageOptions"`Tdocker.ImportImageOptions Jgo.string."docker.ImportImageOptions"þ$go.string."Source"0.Source $go.string."Source"þ4go.string."qs:\"fromSrc\""@: qs:"fromSrc" 4go.string."qs:\"fromSrc\""þ,go.string."qs:\"tag\""@2qs:"tag" ,go.string."qs:\"tag\""þtype..alg."".ImportImageOptions0bruntime.gcbits.0x4848488c8c848484c4c8480000000000PJgo.string."docker.ImportImageOptions"p6type.*"".ImportImageOptions€"runtime.zerovalueÀ4type."".ImportImageOptionsÀ,go.string."Repository"àtype.stringð.go.string."qs:\"repo\""$go.string."Source"°type.stringÀ4go.string."qs:\"fromSrc\""àgo.string."Tag"€type.string,go.string."qs:\"tag\""°.go.string."InputStream"Ðtype.io.Readerà(go.string."qs:\"-\""€0go.string."OutputStream" type.io.Writer°(go.string."qs:\"-\""Ð2go.string."RawJSONStream"ðtype.bool€(go.string."qs:\"-\""` 4type."".ImportImageOptions type.func(*"".Env, string) bool°°™ñJó3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."func(*docker.Env, string) bool"pPgo.weak.type.*func(*"".Env, string) bool€"runtime.zerovalue €>type.func(*"".Env, string) boolР>type.func(*"".Env, string) bool€type.*"".Envtype.string type.boolþXgo.string."func(*docker.Env, string) string"pb func(*docker.Env, string) string Xgo.string."func(*docker.Env, string) string"þBtype.func(*"".Env, string) string°°@iÜ(3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."func(*docker.Env, string) string"pTgo.weak.type.*func(*"".Env, string) string€"runtime.zerovalue €Btype.func(*"".Env, string) stringРBtype.func(*"".Env, string) string€type.*"".Envtype.string type.stringþRgo.string."func(*docker.Env, string) int"`\func(*docker.Env, string) int Rgo.string."func(*docker.Env, string) int"þtype.func(*"".Env, string, int)°°œróÃ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."func(*docker.Env, string, int)"pPgo.weak.type.*func(*"".Env, string, int)€"runtime.zerovalue €>type.func(*"".Env, string, int)а>type.func(*"".Env, string, int)€type.*"".Envtype.string type.intþXgo.string."func(*docker.Env, string, int64)"pb func(*docker.Env, string, int64) Xgo.string."func(*docker.Env, string, int64)"þBtype.func(*"".Env, string, int64)°°g’î3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."func(*docker.Env, string, int64)"pTgo.weak.type.*func(*"".Env, string, int64)€"runtime.zerovalue €Btype.func(*"".Env, string, int64)аBtype.func(*"".Env, string, int64)€type.*"".Envtype.string type.int64þjgo.string."func(*docker.Env, string, []string) error"€t)func(*docker.Env, string, []string) error jgo.string."func(*docker.Env, string, []string) error"þTtype.func(*"".Env, string, []string) errorÀÀ©ÉD¶3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*docker.Env, string, []string) error"pfgo.weak.type.*func(*"".Env, string, []string) error€"runtime.zerovalue €Ttype.func(*"".Env, string, []string) errorаTtype.func(*"".Env, string, []string) error€type.*"".Envtype.string type.[]string°type.errorþ$go.string."Decode"0.Decode $go.string."Decode"þBgo.string."func(io.Reader) error"PLfunc(io.Reader) error Bgo.string."func(io.Reader) error"þ4type.func(io.Reader) error  û&ë3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."func(io.Reader) error"pFgo.weak.type.*func(io.Reader) error€"runtime.zerovalue €4type.func(io.Reader) errorÐ4type.func(io.Reader) error€type.io.Readertype.errorþ$go.string."Exists"0.Exists $go.string."Exists"þ:go.string."func(string) bool"PDfunc(string) bool :go.string."func(string) bool"þ,type.func(string) bool  *÷€3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."func(string) bool"p>go.weak.type.*func(string) bool€"runtime.zerovalue €,type.func(string) boolÐ,type.func(string) bool€type.stringtype.boolþgo.string."Get"0(Get go.string."Get"þ>go.string."func(string) string"PHfunc(string) string >go.string."func(string) string"þ0type.func(string) string  Mü¨ç3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."func(string) string"pBgo.weak.type.*func(string) string€"runtime.zerovalue €0type.func(string) stringÐ0type.func(string) string€type.stringtype.stringþ&go.string."GetBool"00GetBool &go.string."GetBool"þ$go.string."GetInt"0.GetInt $go.string."GetInt"þ8go.string."func(string) int"PBfunc(string) int 8go.string."func(string) int"þ*type.func(string) int  bU3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."func(string) int"ptype.func(string, interface {})  ¹¾ª3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(string, interface {})"pPgo.weak.type.*func(string, interface {})€"runtime.zerovalue €>type.func(string, interface {})Р>type.func(string, interface {})€type.string"type.interface {}þ&go.string."SetBool"00SetBool &go.string."SetBool"þgo.weak.type.*func(string, int)€"runtime.zerovalue €,type.func(string, int)Р,type.func(string, int)€type.stringtype.intþ(go.string."SetInt64"@2SetInt64 (go.string."SetInt64"þ>go.string."func(string, int64)"PHfunc(string, int64) >go.string."func(string, int64)"þ0type.func(string, int64)  ì?3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."func(string, int64)"pBgo.weak.type.*func(string, int64)€"runtime.zerovalue €0type.func(string, int64)Р0type.func(string, int64)€type.stringtype.int64þ&go.string."SetJSON"00SetJSON &go.string."SetJSON"þ&go.string."SetList"00SetList &go.string."SetList"þPgo.string."func(string, []string) error"`Zfunc(string, []string) error Pgo.string."func(string, []string) error"þBtype.func(string, []string) error°°®1òº3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(string, []string) error"pTgo.weak.type.*func(string, []string) error€"runtime.zerovalue €Btype.func(string, []string) errorРBtype.func(string, []string) error€type.stringtype.[]string type.errorþtype.*"".Envð ð Ó96°   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P.go.string."*docker.Env"p*go.weak.type.**"".Env€"runtime.zerovaluetype."".Env` type.*"".EnvÀðtype.*"".Envð$go.string."Decode"4type.func(io.Reader) error Ftype.func(*"".Env, io.Reader) error° "".(*Env).DecodeÀ "".(*Env).DecodeÐ$go.string."Exists"ð,type.func(string) bool€>type.func(*"".Env, string) bool "".(*Env).Exists  "".(*Env).Exists°go.string."Get"Ð0type.func(string) stringàBtype.func(*"".Env, string) stringð"".(*Env).Get€"".(*Env).Get&go.string."GetBool"°,type.func(string) boolÀ>type.func(*"".Env, string) boolÐ""".(*Env).GetBoolà""".(*Env).GetBoolð$go.string."GetInt"*type.func(string) int type.func(string, interface {})à Ptype.func(*"".Env, string, interface {})ð """.(*Env).SetAuto€ +""".(*Env).SetAuto +&go.string."SetBool"° +.type.func(string, bool)À +@type.func(*"".Env, string, bool)Ð +""".(*Env).SetBoolà +""".(*Env).SetBoolð +$go.string."SetInt" ,type.func(string, int)  >type.func(*"".Env, string, int)°  "".(*Env).SetIntÀ  "".(*Env).SetIntÐ (go.string."SetInt64"ð 0type.func(string, int64)€ Btype.func(*"".Env, string, int64) $"".(*Env).SetInt64  $"".(*Env).SetInt64° &go.string."SetJSON"Ð Jtype.func(string, interface {}) errorà \type.func(*"".Env, string, interface {}) errorð """.(*Env).SetJSON€ """.(*Env).SetJSON &go.string."SetList"° Btype.func(string, []string) errorÀ Ttype.func(*"".Env, string, []string) errorÐ """.(*Env).SetListà """.(*Env).SetListþjgo.string."func(*docker.Client) (*docker.Env, error)"€t)func(*docker.Client) (*docker.Env, error) jgo.string."func(*docker.Client) (*docker.Env, error)"þLtype.func(*"".Client) (*"".Env, error)°°0ºl°3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*docker.Client) (*docker.Env, error)"p^go.weak.type.*func(*"".Client) (*"".Env, error)€"runtime.zerovalue €Ltype.func(*"".Client) (*"".Env, error)ÐLtype.func(*"".Client) (*"".Env, error)€type.*"".Clienttype.*"".Env type.errorþ†go.string."func(*docker.Client, string) (*docker.Container, error)"7func(*docker.Client, string) (*docker.Container, error) †go.string."func(*docker.Client, string) (*docker.Container, error)"þhtype.func(*"".Client, string) (*"".Container, error)ÀÀTB3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P†go.string."func(*docker.Client, string) (*docker.Container, error)"pzgo.weak.type.*func(*"".Client, string) (*"".Container, error)€"runtime.zerovalue €htype.func(*"".Client, string) (*"".Container, error)Рhtype.func(*"".Client, string) (*"".Container, error)€type.*"".Clienttype.string $type.*"".Container°type.errorþJgo.string."*docker.ExecProcessConfig"`T*docker.ExecProcessConfig Jgo.string."*docker.ExecProcessConfig"þ4type.*"".ExecProcessConfig  Y916   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."*docker.ExecProcessConfig"pFgo.weak.type.**"".ExecProcessConfig€"runtime.zerovalue2type."".ExecProcessConfigþbruntime.gcbits.0x84444848444884844400000000000000 „DHHDH„„DþHgo.string."docker.ExecProcessConfig"`Rdocker.ExecProcessConfig Hgo.string."docker.ExecProcessConfig"þŽgo.string."json:\"privileged,omitempty\" yaml:\"privileged,omitempty\""7json:"privileged,omitempty" yaml:"privileged,omitempty" Žgo.string."json:\"privileged,omitempty\" yaml:\"privileged,omitempty\""þvgo.string."json:\"user,omitempty\" yaml:\"user,omitempty\""€x+json:"user,omitempty" yaml:"user,omitempty" vgo.string."json:\"user,omitempty\" yaml:\"user,omitempty\""þrgo.string."json:\"tty,omitempty\" yaml:\"tty,omitempty\""€t)json:"tty,omitempty" yaml:"tty,omitempty" rgo.string."json:\"tty,omitempty\" yaml:\"tty,omitempty\""þ,go.string."EntryPoint"@6 +EntryPoint ,go.string."EntryPoint"þŽgo.string."json:\"entrypoint,omitempty\" yaml:\"entrypoint,omitempty\""7json:"entrypoint,omitempty" yaml:"entrypoint,omitempty" Žgo.string."json:\"entrypoint,omitempty\" yaml:\"entrypoint,omitempty\""þ*go.string."Arguments"@4 Arguments *go.string."Arguments"þŠgo.string."json:\"arguments,omitempty\" yaml:\"arguments,omitempty\""Œ5json:"arguments,omitempty" yaml:"arguments,omitempty" Šgo.string."json:\"arguments,omitempty\" yaml:\"arguments,omitempty\""þ:go.string."ExecProcessConfig"PDExecProcessConfig :go.string."ExecProcessConfig"þ2type."".ExecProcessConfig  Hx6® 02 à runtime.algarray0bruntime.gcbits.0x84444848444884844400000000000000PHgo.string."docker.ExecProcessConfig"p4type.*"".ExecProcessConfig€"runtime.zerovalueÀ2type."".ExecProcessConfigÀ,go.string."Privileged"àtype.boolðŽgo.string."json:\"privileged,omitempty\" yaml:\"privileged,omitempty\"" go.string."User"°type.stringÀvgo.string."json:\"user,omitempty\" yaml:\"user,omitempty\""àgo.string."Tty"€type.boolrgo.string."json:\"tty,omitempty\" yaml:\"tty,omitempty\""°,go.string."EntryPoint"Ðtype.stringàŽgo.string."json:\"entrypoint,omitempty\" yaml:\"entrypoint,omitempty\""€*go.string."Arguments" type.[]string°Šgo.string."json:\"arguments,omitempty\" yaml:\"arguments,omitempty\""`Ð2type."".ExecProcessConfigÐ:go.string."ExecProcessConfig"à"go.importpath."".ð 2type."".ExecProcessConfigþ,.type..gc."".ExecInspect€þ6type..gcprog."".ExecInspect&&?Ve™e¥YVV–¦fff¦Zþgo.string."*docker.ExecInspect"PH*docker.ExecInspect >go.string."*docker.ExecInspect"þ(type.*"".ExecInspect  Wœ~6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*docker.ExecInspect"p:go.weak.type.**"".ExecInspect€"runtime.zerovalue&type."".ExecInspectþŠgo.string."func(*docker.Client, string) (*docker.ExecInspect, error)" ”9func(*docker.Client, string) (*docker.ExecInspect, error) Šgo.string."func(*docker.Client, string) (*docker.ExecInspect, error)"þltype.func(*"".Client, string) (*"".ExecInspect, error)ÀÀÎ>mŸ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŠgo.string."func(*docker.Client, string) (*docker.ExecInspect, error)"p~go.weak.type.*func(*"".Client, string) (*"".ExecInspect, error)€"runtime.zerovalue €ltype.func(*"".Client, string) (*"".ExecInspect, error)Рltype.func(*"".Client, string) (*"".ExecInspect, error)€type.*"".Clienttype.string (type.*"".ExecInspect°type.errorþ~go.string."func(*docker.Client, string) (*docker.Image, error)"ˆ3func(*docker.Client, string) (*docker.Image, error) ~go.string."func(*docker.Client, string) (*docker.Image, error)"þ`type.func(*"".Client, string) (*"".Image, error)ÀÀa7:3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P~go.string."func(*docker.Client, string) (*docker.Image, error)"prgo.weak.type.*func(*"".Client, string) (*"".Image, error)€"runtime.zerovalue €`type.func(*"".Client, string) (*"".Image, error)Р`type.func(*"".Client, string) (*"".Image, error)€type.*"".Clienttype.string type.*"".Image°type.errorþ4go.string."*docker.Signal"@>*docker.Signal 4go.string."*docker.Signal"þtype.*"".Signal  Ìp°^6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*docker.Signal"p0go.weak.type.**"".Signal€"runtime.zerovaluetype."".Signalþ2go.string."docker.Signal"@< docker.Signal 2go.string."docker.Signal"þ$go.string."Signal"0.Signal $go.string."Signal"þtype."".Signalàà Ðè­‚   runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P2go.string."docker.Signal"ptype.*"".Signal€"runtime.zerovalue`type."".Signal$go.string."Signal" "go.importpath."".°àtype."".SignalþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þLtype..hashfunc."".KillContainerOptionsDtype..hash."".KillContainerOptionsþHtype..eqfunc."".KillContainerOptions@type..eq."".KillContainerOptionsþBtype..alg."".KillContainerOptions Ltype..hashfunc."".KillContainerOptionsHtype..eqfunc."".KillContainerOptionsþPgo.string."*docker.KillContainerOptions"`Z*docker.KillContainerOptions Pgo.string."*docker.KillContainerOptions"þ:type.*"".KillContainerOptions  ´Ó6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."*docker.KillContainerOptions"pLgo.weak.type.**"".KillContainerOptions€"runtime.zerovalue8type."".KillContainerOptionsþNgo.string."docker.KillContainerOptions"`Xdocker.KillContainerOptions Ngo.string."docker.KillContainerOptions"þ@go.string."KillContainerOptions"PJKillContainerOptions @go.string."KillContainerOptions"þ8type."".KillContainerOptions°°ÄÖÔ6 Btype..alg."".KillContainerOptions0bruntime.gcbits.0x48844400000000000000000000000000PNgo.string."docker.KillContainerOptions"p:type.*"".KillContainerOptions€"runtime.zerovalueÀ8type."".KillContainerOptionsÀgo.string."ID"àtype.stringð(go.string."qs:\"-\""$go.string."Signal"°type."".Signal`à8type."".KillContainerOptionsà@go.string."KillContainerOptions"ð"go.importpath."".€°8type."".KillContainerOptionsþ†go.string."func(*docker.Client, docker.KillContainerOptions) error"7func(*docker.Client, docker.KillContainerOptions) error †go.string."func(*docker.Client, docker.KillContainerOptions) error"þhtype.func(*"".Client, "".KillContainerOptions) error°°Qù¢%3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P†go.string."func(*docker.Client, docker.KillContainerOptions) error"pzgo.weak.type.*func(*"".Client, "".KillContainerOptions) error€"runtime.zerovalue €htype.func(*"".Client, "".KillContainerOptions) errorРhtype.func(*"".Client, "".KillContainerOptions) error€type.*"".Client8type."".KillContainerOptions type.errorþ,go.string."[][]string"@6 +[][]string ,go.string."[][]string"þtype.[][]string  ¼:è   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P,go.string."[][]string"p0go.weak.type.*[][]string€"runtime.zerovaluetype.[]stringþBgo.typelink.[][]string/[][]stringtype.[][]stringþ.go.string."[8][]string"@8 [8][]string .go.string."[8][]string"þ type.[8][]stringÀÀÀ½e³r à runtime.algarray0bruntime.gcbits.0x48844448844448844448844400000000P.go.string."[8][]string"p2go.weak.type.*[8][]string€"runtime.zerovaluetype.[]string type.[][]stringþFgo.typelink.[8][]string/[8][]string type.[8][]stringþNgo.string."*map.bucket[string][]string"`X*map.bucket[string][]string Ngo.string."*map.bucket[string][]string"þ@type.*map.bucket[string][]string  ÄY¬R6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."*map.bucket[string][]string"pRgo.weak.type.**map.bucket[string][]string€"runtime.zerovalue>type.map.bucket[string][]stringþ,Ftype..gc.map.bucket[string][]string,þNtype..gcprog.map.bucket[string][]string*™™™™Y–eY–e þLgo.string."map.bucket[string][]string"`Vmap.bucket[string][]string Lgo.string."map.bucket[string][]string"þ>type.map.bucket[string][]string°°PúTJ¹YˆH à runtime.algarray0Ftype..gc.map.bucket[string][]string@Ntype..gcprog.map.bucket[string][]stringPLgo.string."map.bucket[string][]string"pPgo.weak.type.*map.bucket[string][]string€"runtime.zerovalueÀ>type.map.bucket[string][]stringÀ go.string."keys"àtype.[8]string$go.string."values"° type.[8][]stringà(go.string."overflow"€@type.*map.bucket[string][]stringþFgo.string."map.hdr[string][]string"PPmap.hdr[string][]string Fgo.string."map.hdr[string][]string"þ8type.map.hdr[string][]stringàà0–‹˜  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PFgo.string."map.hdr[string][]string"pJgo.weak.type.*map.hdr[string][]string€"runtime.zerovalueÀ8type.map.hdr[string][]stringÀ&go.string."buckets"à@type.*map.bucket[string][]string,go.string."oldbuckets"°@type.*map.bucket[string][]stringþ>go.string."map[string][]string"PHmap[string][]string >go.string."map[string][]string"þ0type.map[string][]stringÜÜ'>@5P € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."map[string][]string"pBgo.weak.type.*map[string][]string€"runtime.zerovaluetype.string type.[]string°>type.map.bucket[string][]stringÀ8type.map.hdr[string][]stringþfgo.typelink.map[string][]string/map[string][]string0type.map[string][]stringþRgo.string."*docker.ListContainersOptions"`\*docker.ListContainersOptions Rgo.string."*docker.ListContainersOptions"þgo.weak.type.**"".APIContainers€"runtime.zerovalue*type."".APIContainersþ,2type..gc."".APIContainers$þ:type..gcprog."".APIContainersf–Yeþ@go.string."docker.APIContainers"PJdocker.APIContainers @go.string."docker.APIContainers"þ&go.string."Command"00Command &go.string."Command"þ‚go.string."json:\"Command,omitempty\" yaml:\"Command,omitempty\""„1json:"Command,omitempty" yaml:"Command,omitempty" ‚go.string."json:\"Command,omitempty\" yaml:\"Command,omitempty\""þ$go.string."SizeRw"0.SizeRw $go.string."SizeRw"þ~go.string."json:\"SizeRw,omitempty\" yaml:\"SizeRw,omitempty\""€€/json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" ~go.string."json:\"SizeRw,omitempty\" yaml:\"SizeRw,omitempty\""þ,go.string."SizeRootFs"@6 +SizeRootFs ,go.string."SizeRootFs"þŽgo.string."json:\"SizeRootFs,omitempty\" yaml:\"SizeRootFs,omitempty\""7json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" Žgo.string."json:\"SizeRootFs,omitempty\" yaml:\"SizeRootFs,omitempty\""þzgo.string."json:\"Names,omitempty\" yaml:\"Names,omitempty\""€|-json:"Names,omitempty" yaml:"Names,omitempty" zgo.string."json:\"Names,omitempty\" yaml:\"Names,omitempty\""þ2go.string."APIContainers"@< APIContainers 2go.string."APIContainers"þ*type."".APIContainersààˆ¿¨û Y  08H`hpL à runtime.algarray02type..gc."".APIContainers@:type..gcprog."".APIContainersP@go.string."docker.APIContainers"p,type.*"".APIContainers€"runtime.zerovalueÀ*type."".APIContainersÀgo.string."ID"àtype.stringðFgo.string."json:\"Id\" yaml:\"Id\"""go.string."Image"°type.stringÀzgo.string."json:\"Image,omitempty\" yaml:\"Image,omitempty\""à&go.string."Command"€type.string‚go.string."json:\"Command,omitempty\" yaml:\"Command,omitempty\""°&go.string."Created"Ðtype.int64à‚go.string."json:\"Created,omitempty\" yaml:\"Created,omitempty\""€$go.string."Status" type.string°~go.string."json:\"Status,omitempty\" yaml:\"Status,omitempty\""Ð"go.string."Ports"ð"type.[]"".APIPort€zgo.string."json:\"Ports,omitempty\" yaml:\"Ports,omitempty\"" $go.string."SizeRw"Àtype.int64Ð~go.string."json:\"SizeRw,omitempty\" yaml:\"SizeRw,omitempty\""ð,go.string."SizeRootFs"type.int64 Žgo.string."json:\"SizeRootFs,omitempty\" yaml:\"SizeRootFs,omitempty\""À"go.string."Names"àtype.[]stringðzgo.string."json:\"Names,omitempty\" yaml:\"Names,omitempty\""`*type."".APIContainers2go.string."APIContainers" "go.importpath."".°à*type."".APIContainersþDgo.string."[]docker.APIContainers"PN[]docker.APIContainers Dgo.string."[]docker.APIContainers"þ.type.[]"".APIContainers  Ò   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PDgo.string."[]docker.APIContainers"p@go.weak.type.*[]"".APIContainers€"runtime.zerovalue*type."".APIContainersþjgo.typelink.[]docker.APIContainers/[]"".APIContainers.type.[]"".APIContainersþ¼go.string."func(*docker.Client, docker.ListContainersOptions) ([]docker.APIContainers, error)"ÐÆRfunc(*docker.Client, docker.ListContainersOptions) ([]docker.APIContainers, error) ¼go.string."func(*docker.Client, docker.ListContainersOptions) ([]docker.APIContainers, error)"þ–type.func(*"".Client, "".ListContainersOptions) ([]"".APIContainers, error)ÀÀ¡sž“3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P¼go.string."func(*docker.Client, docker.ListContainersOptions) ([]docker.APIContainers, error)"p¨go.weak.type.*func(*"".Client, "".ListContainersOptions) ([]"".APIContainers, error)€"runtime.zerovalue €–type.func(*"".Client, "".ListContainersOptions) ([]"".APIContainers, error)Р–type.func(*"".Client, "".ListContainersOptions) ([]"".APIContainers, error)€type.*"".Client:type."".ListContainersOptions .type.[]"".APIContainers°type.errorþJgo.string."*docker.ListImagesOptions"`T*docker.ListImagesOptions Jgo.string."*docker.ListImagesOptions"þ4type.*"".ListImagesOptions  ã<«²6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."*docker.ListImagesOptions"pFgo.weak.type.**"".ListImagesOptions€"runtime.zerovalue2type."".ListImagesOptionsþbruntime.gcbits.0x84444800000000000000000000000000 „DHþHgo.string."docker.ListImagesOptions"`Rdocker.ListImagesOptions Hgo.string."docker.ListImagesOptions"þ&go.string."Digests"00Digests &go.string."Digests"þ:go.string."ListImagesOptions"PDListImagesOptions :go.string."ListImagesOptions"þ2type."".ListImagesOptions€€™Q Z à runtime.algarray0bruntime.gcbits.0x84444800000000000000000000000000PHgo.string."docker.ListImagesOptions"p4type.*"".ListImagesOptions€"runtime.zerovalueÀ2type."".ListImagesOptionsÀgo.string."All"àtype.bool&go.string."Filters"°0type.map[string][]stringà&go.string."Digests"€type.bool`°2type."".ListImagesOptions°:go.string."ListImagesOptions"À"go.importpath."".Ѐ2type."".ListImagesOptionsþ:go.string."*docker.APIImages"PD*docker.APIImages :go.string."*docker.APIImages"þ$type.*"".APIImages  ‰5{¶6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.APIImages"p6go.weak.type.**"".APIImages€"runtime.zerovalue"type."".APIImagesþbruntime.gcbits.0x48484444484884000000000000000000 HHDDHH„þ8go.string."docker.APIImages"PBdocker.APIImages 8go.string."docker.APIImages"þ(go.string."RepoTags"@2RepoTags (go.string."RepoTags"þ†go.string."json:\"RepoTags,omitempty\" yaml:\"RepoTags,omitempty\""ˆ3json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" †go.string."json:\"RepoTags,omitempty\" yaml:\"RepoTags,omitempty\""þ(go.string."ParentID"@2ParentID (go.string."ParentID"þ†go.string."json:\"ParentId,omitempty\" yaml:\"ParentId,omitempty\""ˆ3json:"ParentId,omitempty" yaml:"ParentId,omitempty" †go.string."json:\"ParentId,omitempty\" yaml:\"ParentId,omitempty\""þ.go.string."RepoDigests"@8 RepoDigests .go.string."RepoDigests"þ’go.string."json:\"RepoDigests,omitempty\" yaml:\"RepoDigests,omitempty\"" ”9json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" ’go.string."json:\"RepoDigests,omitempty\" yaml:\"RepoDigests,omitempty\""þ*go.string."APIImages"@4 APIImages *go.string."APIImages"þ"type."".APIImagesp#3[(08@PhD à runtime.algarray0bruntime.gcbits.0x48484444484884000000000000000000P8go.string."docker.APIImages"p$type.*"".APIImages€"runtime.zerovalueÀ"type."".APIImagesÀgo.string."ID"àtype.stringðFgo.string."json:\"Id\" yaml:\"Id\""(go.string."RepoTags"°type.[]stringÀ†go.string."json:\"RepoTags,omitempty\" yaml:\"RepoTags,omitempty\""à&go.string."Created"€type.int64‚go.string."json:\"Created,omitempty\" yaml:\"Created,omitempty\""° go.string."Size"Ðtype.int64àvgo.string."json:\"Size,omitempty\" yaml:\"Size,omitempty\""€.go.string."VirtualSize" type.int64°’go.string."json:\"VirtualSize,omitempty\" yaml:\"VirtualSize,omitempty\""Ð(go.string."ParentID"ðtype.string€†go.string."json:\"ParentId,omitempty\" yaml:\"ParentId,omitempty\"" .go.string."RepoDigests"Àtype.[]stringÐ’go.string."json:\"RepoDigests,omitempty\" yaml:\"RepoDigests,omitempty\""ð$go.string."Labels",type.map[string]string ~go.string."json:\"Labels,omitempty\" yaml:\"Labels,omitempty\""`À"type."".APIImagesÀ*go.string."APIImages"Ð"go.importpath."".à"type."".APIImagesþgo.string."*docker.LogsOptions"PH*docker.LogsOptions >go.string."*docker.LogsOptions"þ(type.*"".LogsOptions  ±…ÐI6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*docker.LogsOptions"p:go.weak.type.**"".LogsOptions€"runtime.zerovalue&type."".LogsOptionsþbruntime.gcbits.0x488c8c44844400000000000000000000 HŒŒD„Dþqs:"fromImage" 8go.string."qs:\"fromImage\""þ(go.string."Registry"@2Registry (go.string."Registry"þ8go.string."PullImageOptions"PBPullImageOptions 8go.string."PullImageOptions"þ0type."".PullImageOptions  Hh¯6‹ 0@. :type..alg."".PullImageOptions0bruntime.gcbits.0x4848488c848484c44800000000000000PFgo.string."docker.PullImageOptions"p2type.*"".PullImageOptions€"runtime.zerovalueÀ0type."".PullImageOptionsÀ,go.string."Repository"àtype.stringð8go.string."qs:\"fromImage\""(go.string."Registry"°type.stringàgo.string."Tag"€type.string°0go.string."OutputStream"Ðtype.io.Writerà(go.string."qs:\"-\""€2go.string."RawJSONStream" type.bool°(go.string."qs:\"-\""`Ð0type."".PullImageOptionsÐ8go.string."PullImageOptions"à"go.importpath."".ð 0type."".PullImageOptionsþ²go.string."func(*docker.Client, docker.PullImageOptions, docker.AuthConfiguration) error"À¼Mfunc(*docker.Client, docker.PullImageOptions, docker.AuthConfiguration) error ²go.string."func(*docker.Client, docker.PullImageOptions, docker.AuthConfiguration) error"þŒtype.func(*"".Client, "".PullImageOptions, "".AuthConfiguration) errorÀÀMµÏ§3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P²go.string."func(*docker.Client, docker.PullImageOptions, docker.AuthConfiguration) error"pžgo.weak.type.*func(*"".Client, "".PullImageOptions, "".AuthConfiguration) error€"runtime.zerovalue €Œtype.func(*"".Client, "".PullImageOptions, "".AuthConfiguration) errorаŒtype.func(*"".Client, "".PullImageOptions, "".AuthConfiguration) error€type.*"".Client0type."".PullImageOptions 2type."".AuthConfiguration°type.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·e13351f28add7c60853cb3aac0a0e34eþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þDtype..hashfunc."".PushImageOptionstype.*"".RemoveContainerOptions  MðSs6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*docker.RemoveContainerOptions"pPgo.weak.type.**"".RemoveContainerOptions€"runtime.zerovaluetype.*"".RemoveContainerOptions€"runtime.zerovalueÀtype.*"".RenameContainerOptions  Ff¹6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*docker.RenameContainerOptions"pPgo.weak.type.**"".RenameContainerOptions€"runtime.zerovaluetype.*"".RenameContainerOptions€"runtime.zerovalueÀAPIImageSearch 4go.string."APIImageSearch"þ,type."".APIImageSearch  0šÁTi(2 6type..alg."".APIImageSearch0bruntime.gcbits.0x48844400000000000000000000000000PBgo.string."docker.APIImageSearch"p.type.*"".APIImageSearch€"runtime.zerovalueÀ,type."".APIImageSearchÀ.go.string."Description"àtype.stringð’go.string."json:\"description,omitempty\" yaml:\"description,omitempty\"",go.string."IsOfficial"°type.boolÀ’go.string."json:\"is_official,omitempty\" yaml:\"is_official,omitempty\""à.go.string."IsAutomated"€type.bool–go.string."json:\"is_automated,omitempty\" yaml:\"is_automated,omitempty\""° go.string."Name"Ðtype.stringàvgo.string."json:\"name,omitempty\" yaml:\"name,omitempty\""€*go.string."StarCount" type.int°Žgo.string."json:\"star_count,omitempty\" yaml:\"star_count,omitempty\""`Ð,type."".APIImageSearchÐ4go.string."APIImageSearch"à"go.importpath."".ð ,type."".APIImageSearchþFgo.string."[]docker.APIImageSearch"PP[]docker.APIImageSearch Fgo.string."[]docker.APIImageSearch"þ0type.[]"".APIImageSearch  SƒÎÀ   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PFgo.string."[]docker.APIImageSearch"pBgo.weak.type.*[]"".APIImageSearch€"runtime.zerovalue,type."".APIImageSearchþngo.typelink.[]docker.APIImageSearch/[]"".APIImageSearch0type.[]"".APIImageSearchþ’go.string."func(*docker.Client, string) ([]docker.APIImageSearch, error)" œ=func(*docker.Client, string) ([]docker.APIImageSearch, error) ’go.string."func(*docker.Client, string) ([]docker.APIImageSearch, error)"þttype.func(*"".Client, string) ([]"".APIImageSearch, error)ÀÀ+ÖÄ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P’go.string."func(*docker.Client, string) ([]docker.APIImageSearch, error)"p†go.weak.type.*func(*"".Client, string) ([]"".APIImageSearch, error)€"runtime.zerovalue €ttype.func(*"".Client, string) ([]"".APIImageSearch, error)Рttype.func(*"".Client, string) ([]"".APIImageSearch, error)€type.*"".Clienttype.string 0type.[]"".APIImageSearch°type.errorþ„go.string."func(*docker.Client, string, *docker.HostConfig) error"Ž6func(*docker.Client, string, *docker.HostConfig) error „go.string."func(*docker.Client, string, *docker.HostConfig) error"þftype.func(*"".Client, string, *"".HostConfig) errorÀÀgÊÚ¿3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P„go.string."func(*docker.Client, string, *docker.HostConfig) error"pxgo.weak.type.*func(*"".Client, string, *"".HostConfig) error€"runtime.zerovalue €ftype.func(*"".Client, string, *"".HostConfig) errorаftype.func(*"".Client, string, *"".HostConfig) error€type.*"".Clienttype.string &type.*"".HostConfig°type.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·e13351f28add7c60853cb3aac0a0e34eþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þDtype..hashfunc."".StartExecOptions :type..alg."".StartExecOptions0bruntime.gcbits.0xc4c8c848488c8c8c8400000000000000PFgo.string."docker.StartExecOptions"p2type.*"".StartExecOptions€"runtime.zerovalueÀ0type."".StartExecOptionsÀ$go.string."Detach"àtype.boolð~go.string."json:\"Detach,omitempty\" yaml:\"Detach,omitempty\""go.string."Tty"°type.boolÀrgo.string."json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""à.go.string."InputStream"€type.io.Reader(go.string."qs:\"-\""°0go.string."OutputStream"Ðtype.io.Writerà(go.string."qs:\"-\""€.go.string."ErrorStream" type.io.Writer°(go.string."qs:\"-\""Ð.go.string."RawTerminal"ðtype.bool€(go.string."qs:\"-\"" &go.string."Success"À&type.chan struct {}Ð,go.string."json:\"-\""`ð0type."".StartExecOptionsð8go.string."StartExecOptions"€"go.importpath."".À0type."".StartExecOptionsþŽgo.string."func(*docker.Client, string, docker.StartExecOptions) error" ˜;func(*docker.Client, string, docker.StartExecOptions) error Žgo.string."func(*docker.Client, string, docker.StartExecOptions) error"þptype.func(*"".Client, string, "".StartExecOptions) errorÀÀó$y3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(*docker.Client, string, docker.StartExecOptions) error"p‚go.weak.type.*func(*"".Client, string, "".StartExecOptions) error€"runtime.zerovalue €ptype.func(*"".Client, string, "".StartExecOptions) errorаptype.func(*"".Client, string, "".StartExecOptions) error€type.*"".Clienttype.string 0type."".StartExecOptions°type.errorþ""..gostring.1 + +wstruct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" } ""..gostring.1þ*go.string."RxDropped"@4 RxDropped *go.string."RxDropped"þŽgo.string."json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""7json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" Žgo.string."json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""þ&go.string."RxBytes"00RxBytes &go.string."RxBytes"þ†go.string."json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""ˆ3json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" †go.string."json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""þ(go.string."RxErrors"@2RxErrors (go.string."RxErrors"þŠgo.string."json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""Œ5json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" Šgo.string."json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""þ*go.string."TxPackets"@4 TxPackets *go.string."TxPackets"þŽgo.string."json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""7json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" Žgo.string."json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""þ*go.string."TxDropped"@4 TxDropped *go.string."TxDropped"þŽgo.string."json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""7json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" Žgo.string."json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""þ*go.string."RxPackets"@4 RxPackets *go.string."RxPackets"þŽgo.string."json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""7json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" Žgo.string."json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""þ(go.string."TxErrors"@2TxErrors (go.string."TxErrors"þŠgo.string."json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""Œ5json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" Šgo.string."json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""þ&go.string."TxBytes"00TxBytes &go.string."TxBytes"þ†go.string."json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\""ˆ3json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" †go.string."json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\""þø type.struct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" }ÀÀ@/>*^™ (08<  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P""..gostring.1pŠ +go.weak.type.*struct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" }€"runtime.zerovalueÀø type.struct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" }À*go.string."RxDropped"àtype.uint64ðŽgo.string."json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""&go.string."RxBytes"°type.uint64À†go.string."json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""à(go.string."RxErrors"€type.uint64Šgo.string."json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""°*go.string."TxPackets"Ðtype.uint64àŽgo.string."json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""€*go.string."TxDropped" type.uint64°Žgo.string."json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""Ð*go.string."RxPackets"ðtype.uint64€Žgo.string."json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\"" (go.string."TxErrors"Àtype.uint64Њgo.string."json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""ð&go.string."TxBytes"type.uint64 †go.string."json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\""þ""..gostring.2ð'î'æ struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } ""..gostring.2þ4go.string."TotalPgmafault"@>TotalPgmafault 4go.string."TotalPgmafault"þ¢go.string."json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""°¤Ajson:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" ¢go.string."json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""þ"go.string."Cache"0,Cache "go.string."Cache"þzgo.string."json:\"cache,omitempty\" yaml:\"cache,omitempty\""€|-json:"cache,omitempty" yaml:"cache,omitempty" zgo.string."json:\"cache,omitempty\" yaml:\"cache,omitempty\""þ,go.string."MappedFile"@6 +MappedFile ,go.string."MappedFile"þ’go.string."json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\"" ”9json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" ’go.string."json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""þ:go.string."TotalInactiveFile"PDTotalInactiveFile :go.string."TotalInactiveFile"þ²go.string."json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""À´Ijson:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" ²go.string."json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""þ&go.string."Pgpgout"00Pgpgout &go.string."Pgpgout"þ‚go.string."json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""„1json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" ‚go.string."json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""þgo.string."Rss"0(Rss go.string."Rss"þrgo.string."json:\"rss,omitempty\" yaml:\"rss,omitempty\""€t)json:"rss,omitempty" yaml:"rss,omitempty" rgo.string."json:\"rss,omitempty\" yaml:\"rss,omitempty\""þ6go.string."TotalMappedFile"@@TotalMappedFile 6go.string."TotalMappedFile"þªgo.string."json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""°¬Ejson:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" ªgo.string."json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""þ*go.string."Writeback"@4 Writeback *go.string."Writeback"þŠgo.string."json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""Œ5json:"writeback,omitempty" yaml:"writeback,omitempty" Šgo.string."json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""þ.go.string."Unevictable"@8 Unevictable .go.string."Unevictable"þ’go.string."json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\"" ”9json:"unevictable,omitempty" yaml:"unevictable,omitempty" ’go.string."json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""þ$go.string."Pgpgin"0.Pgpgin $go.string."Pgpgin"þ~go.string."json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""€€/json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" ~go.string."json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""þ8go.string."TotalUnevictable"PBTotalUnevictable 8go.string."TotalUnevictable"þªgo.string."json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""°¬Ejson:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" ªgo.string."json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""þ,go.string."Pgmajfault"@6 +Pgmajfault ,go.string."Pgmajfault"þŽgo.string."json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""7json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" Žgo.string."json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""þ(go.string."TotalRss"@2TotalRss (go.string."TotalRss"þŠgo.string."json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""Œ5json:"total_rss,omitempty" yaml:"total_rss,omitempty" Šgo.string."json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""þ0go.string."TotalRssHuge"@: TotalRssHuge 0go.string."TotalRssHuge"þžgo.string."json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""  ?json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" žgo.string."json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""þ4go.string."TotalWriteback"@>TotalWriteback 4go.string."TotalWriteback"þ¢go.string."json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""°¤Ajson:"total_writeback,omitempty" yaml:"total_writeback,omitempty" ¢go.string."json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""þ:go.string."TotalInactiveAnon"PDTotalInactiveAnon :go.string."TotalInactiveAnon"þ²go.string."json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""À´Ijson:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" ²go.string."json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""þ&go.string."RssHuge"00RssHuge &go.string."RssHuge"þ†go.string."json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""ˆ3json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" †go.string."json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""þFgo.string."HierarchicalMemoryLimit"PPHierarchicalMemoryLimit Fgo.string."HierarchicalMemoryLimit"þÊgo.string."json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""ÐÌUjson:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" Êgo.string."json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""þ0go.string."TotalPgfault"@: TotalPgfault 0go.string."TotalPgfault"þšgo.string."json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\"" œ=json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" šgo.string."json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""þ6go.string."TotalActiveFile"@@TotalActiveFile 6go.string."TotalActiveFile"þªgo.string."json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""°¬Ejson:"total_active_file,omitempty" yaml:"total_active_file,omitempty" ªgo.string."json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""þ,go.string."ActiveAnon"@6 +ActiveAnon ,go.string."ActiveAnon"þ’go.string."json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\"" ”9json:"active_anon,omitempty" yaml:"active_anon,omitempty" ’go.string."json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""þ6go.string."TotalActiveAnon"@@TotalActiveAnon 6go.string."TotalActiveAnon"þªgo.string."json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""°¬Ejson:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" ªgo.string."json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""þ0go.string."TotalPgpgout"@: TotalPgpgout 0go.string."TotalPgpgout"þšgo.string."json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\"" œ=json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" šgo.string."json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""þ,go.string."TotalCache"@6 +TotalCache ,go.string."TotalCache"þ’go.string."json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\"" ”9json:"total_cache,omitempty" yaml:"total_cache,omitempty" ’go.string."json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""þ0go.string."InactiveAnon"@: InactiveAnon 0go.string."InactiveAnon"þšgo.string."json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\"" œ=json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" šgo.string."json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""þ,go.string."ActiveFile"@6 +ActiveFile ,go.string."ActiveFile"þ’go.string."json:\"active_file,omitempty\" yaml:\"active_file,omitempty\"" ”9json:"active_file,omitempty" yaml:"active_file,omitempty" ’go.string."json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""þ&go.string."Pgfault"00Pgfault &go.string."Pgfault"þ‚go.string."json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""„1json:"pgfault,omitempty" yaml:"pgfault,omitempty" ‚go.string."json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""þ0go.string."InactiveFile"@: InactiveFile 0go.string."InactiveFile"þšgo.string."json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\"" œ=json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" šgo.string."json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""þ.go.string."TotalPgpgin"@8 TotalPgpgin .go.string."TotalPgpgin"þ–go.string."json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" ˜;json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" –go.string."json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\""þÖ'type.struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" }ÐÐè齪Ÿ™ (08@HPX`hpx€ˆ˜ ¨°¸ÀÈÐØຠ runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P""..gostring.2pè'go.weak.type.*struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" }€"runtime.zerovalueÀÖ'type.struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" }À4go.string."TotalPgmafault"àtype.uint64ð¢go.string."json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\"""go.string."Cache"°type.uint64Àzgo.string."json:\"cache,omitempty\" yaml:\"cache,omitempty\""à,go.string."MappedFile"€type.uint64’go.string."json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""°:go.string."TotalInactiveFile"Ðtype.uint64à²go.string."json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""€&go.string."Pgpgout" type.uint64°‚go.string."json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""Ðgo.string."Rss"ðtype.uint64€rgo.string."json:\"rss,omitempty\" yaml:\"rss,omitempty\"" 6go.string."TotalMappedFile"Àtype.uint64Ъgo.string."json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""ð*go.string."Writeback"type.uint64 Šgo.string."json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""À.go.string."Unevictable"àtype.uint64ð’go.string."json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""$go.string."Pgpgin"°type.uint64À~go.string."json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""à8go.string."TotalUnevictable"€type.uint64ªgo.string."json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""°,go.string."Pgmajfault"Ðtype.uint64àŽgo.string."json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""€ (go.string."TotalRss"  type.uint64° Šgo.string."json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""Ð 0go.string."TotalRssHuge"ð type.uint64€ +žgo.string."json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""  +4go.string."TotalWriteback"À +type.uint64Ð +¢go.string."json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""ð +:go.string."TotalInactiveAnon" type.uint64  ²go.string."json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""À &go.string."RssHuge"à type.uint64ð †go.string."json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\"" Fgo.string."HierarchicalMemoryLimit"° type.uint64À Êgo.string."json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""à 0go.string."TotalPgfault"€ type.uint64 šgo.string."json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""° 6go.string."TotalActiveFile"Ð type.uint64à ªgo.string."json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""€,go.string."ActiveAnon" type.uint64°’go.string."json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""Ð6go.string."TotalActiveAnon"ðtype.uint64€ªgo.string."json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\"" 0go.string."TotalPgpgout"Àtype.uint64Кgo.string."json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""ð,go.string."TotalCache"type.uint64 ’go.string."json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""À0go.string."InactiveAnon"àtype.uint64ðšgo.string."json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\"",go.string."ActiveFile"°type.uint64À’go.string."json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""à&go.string."Pgfault"€type.uint64‚go.string."json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""°0go.string."InactiveFile"Ðtype.uint64àšgo.string."json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""€.go.string."TotalPgpgin" type.uint64°–go.string."json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\""þ""..gostring.3°-ª-D struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" } ""..gostring.3þ"go.string."Stats"0,Stats "go.string."Stats"þzgo.string."json:\"stats,omitempty\" yaml:\"stats,omitempty\""€|-json:"stats,omitempty" yaml:"stats,omitempty" zgo.string."json:\"stats,omitempty\" yaml:\"stats,omitempty\""þ(go.string."MaxUsage"@2MaxUsage (go.string."MaxUsage"þŠgo.string."json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""Œ5json:"max_usage,omitempty" yaml:"max_usage,omitempty" Šgo.string."json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""þ"go.string."Usage"0,Usage "go.string."Usage"þzgo.string."json:\"usage,omitempty\" yaml:\"usage,omitempty\""€|-json:"usage,omitempty" yaml:"usage,omitempty" zgo.string."json:\"usage,omitempty\" yaml:\"usage,omitempty\""þ&go.string."Failcnt"00Failcnt &go.string."Failcnt"þ‚go.string."json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""„1json:"failcnt,omitempty" yaml:"failcnt,omitempty" ‚go.string."json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""þzgo.string."json:\"limit,omitempty\" yaml:\"limit,omitempty\""€|-json:"limit,omitempty" yaml:"limit,omitempty" zgo.string."json:\"limit,omitempty\" yaml:\"limit,omitempty\""þ’-type.struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" }ÐШ ÌÞ™èðø*  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P""..gostring.3p¤-go.weak.type.*struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" }€"runtime.zerovalueÀ’-type.struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" }À"go.string."Stats"àÖ'type.struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" }ðzgo.string."json:\"stats,omitempty\" yaml:\"stats,omitempty\""(go.string."MaxUsage"°type.uint64ÀŠgo.string."json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""à"go.string."Usage"€type.uint64zgo.string."json:\"usage,omitempty\" yaml:\"usage,omitempty\""°&go.string."Failcnt"Ðtype.uint64à‚go.string."json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""€"go.string."Limit" type.uint64°zgo.string."json:\"limit,omitempty\" yaml:\"limit,omitempty\""þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þBtype..hashfunc."".BlkioStatsEntry:type..hash."".BlkioStatsEntryþ>type..eqfunc."".BlkioStatsEntry6type..eq."".BlkioStatsEntryþ8type..alg."".BlkioStatsEntry Btype..hashfunc."".BlkioStatsEntry>type..eqfunc."".BlkioStatsEntryþFgo.string."*docker.BlkioStatsEntry"PP*docker.BlkioStatsEntry Fgo.string."*docker.BlkioStatsEntry"þ0type.*"".BlkioStatsEntry  KŽœ66   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*docker.BlkioStatsEntry"pBgo.weak.type.**"".BlkioStatsEntry€"runtime.zerovalue.type."".BlkioStatsEntryþbruntime.gcbits.0x44484484440000000000000000000000 DHD„DþDgo.string."docker.BlkioStatsEntry"PNdocker.BlkioStatsEntry Dgo.string."docker.BlkioStatsEntry"þ"go.string."Major"0,Major "go.string."Major"þzgo.string."json:\"major,omitempty\" yaml:\"major,omitempty\""€|-json:"major,omitempty" yaml:"major,omitempty" zgo.string."json:\"major,omitempty\" yaml:\"major,omitempty\""þ"go.string."Minor"0,Minor "go.string."Minor"þzgo.string."json:\"minor,omitempty\" yaml:\"minor,omitempty\""€|-json:"minor,omitempty" yaml:"minor,omitempty" zgo.string."json:\"minor,omitempty\" yaml:\"minor,omitempty\""þgo.string."Op"0&Op go.string."Op"þngo.string."json:\"op,omitempty\" yaml:\"op,omitempty\""pp'json:"op,omitempty" yaml:"op,omitempty" ngo.string."json:\"op,omitempty\" yaml:\"op,omitempty\""þzgo.string."json:\"value,omitempty\" yaml:\"value,omitempty\""€|-json:"value,omitempty" yaml:"value,omitempty" zgo.string."json:\"value,omitempty\" yaml:\"value,omitempty\""þ6go.string."BlkioStatsEntry"@@BlkioStatsEntry 6go.string."BlkioStatsEntry"þ.type."".BlkioStatsEntryÐÐ(Ê-Èg , 8type..alg."".BlkioStatsEntry0bruntime.gcbits.0x44484484440000000000000000000000PDgo.string."docker.BlkioStatsEntry"p0type.*"".BlkioStatsEntry€"runtime.zerovalueÀ.type."".BlkioStatsEntryÀ"go.string."Major"àtype.uint64ðzgo.string."json:\"major,omitempty\" yaml:\"major,omitempty\"""go.string."Minor"°type.uint64Àzgo.string."json:\"minor,omitempty\" yaml:\"minor,omitempty\""àgo.string."Op"€type.stringngo.string."json:\"op,omitempty\" yaml:\"op,omitempty\""°"go.string."Value"Ðtype.uint64àzgo.string."json:\"value,omitempty\" yaml:\"value,omitempty\""`€.type."".BlkioStatsEntry€6go.string."BlkioStatsEntry""go.importpath."". Ð.type."".BlkioStatsEntryþHgo.string."[]docker.BlkioStatsEntry"`R[]docker.BlkioStatsEntry Hgo.string."[]docker.BlkioStatsEntry"þ2type.[]"".BlkioStatsEntry  ò‘/#   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PHgo.string."[]docker.BlkioStatsEntry"pDgo.weak.type.*[]"".BlkioStatsEntry€"runtime.zerovalue.type."".BlkioStatsEntryþrgo.typelink.[]docker.BlkioStatsEntry/[]"".BlkioStatsEntry2type.[]"".BlkioStatsEntryþ""..gostring.4À¾struct { IOServiceBytesRecursive []docker.BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []docker.BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []docker.BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []docker.BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []docker.BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []docker.BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []docker.BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []docker.BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" } ""..gostring.4þFgo.string."IOServiceBytesRecursive"PPIOServiceBytesRecursive Fgo.string."IOServiceBytesRecursive"þÎgo.string."json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""ÐÐWjson:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" Îgo.string."json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""þ>go.string."IOServicedRecursive"PHIOServicedRecursive >go.string."IOServicedRecursive"þºgo.string."json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""À¼Mjson:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" ºgo.string."json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""þ8go.string."IOQueueRecursive"PBIOQueueRecursive 8go.string."IOQueueRecursive"þ®go.string."json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""°°Gjson:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" ®go.string."json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""þDgo.string."IOServiceTimeRecursive"PNIOServiceTimeRecursive Dgo.string."IOServiceTimeRecursive"þÊgo.string."json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""ÐÌUjson:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" Êgo.string."json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""þ>go.string."IOWaitTimeRecursive"PHIOWaitTimeRecursive >go.string."IOWaitTimeRecursive"þ¾go.string."json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""ÀÀOjson:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" ¾go.string."json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""þ:go.string."IOMergedRecursive"PDIOMergedRecursive :go.string."IOMergedRecursive"þ²go.string."json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""À´Ijson:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" ²go.string."json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""þ6go.string."IOTimeRecursive"@@IOTimeRecursive 6go.string."IOTimeRecursive"þªgo.string."json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""°¬Ejson:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" ªgo.string."json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""þ8go.string."SectorsRecursive"PBSectorsRecursive 8go.string."SectorsRecursive"þªgo.string."json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\""°¬Ejson:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" ªgo.string."json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\""þætype.struct { IOServiceBytesRecursive []"".BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []"".BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []"".BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []"".BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []"".BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []"".BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []"".BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []"".BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" }ÀÀÀ–V›*0H`x¨< à runtime.algarray0bruntime.gcbits.0x48844448844448844448844400000000P""..gostring.4pøgo.weak.type.*struct { IOServiceBytesRecursive []"".BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []"".BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []"".BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []"".BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []"".BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []"".BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []"".BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []"".BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" }€"runtime.zerovalueÀætype.struct { IOServiceBytesRecursive []"".BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []"".BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []"".BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []"".BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []"".BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []"".BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []"".BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []"".BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" }ÀFgo.string."IOServiceBytesRecursive"à2type.[]"".BlkioStatsEntryðÎgo.string."json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\"">go.string."IOServicedRecursive"°2type.[]"".BlkioStatsEntryÀºgo.string."json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""à8go.string."IOQueueRecursive"€2type.[]"".BlkioStatsEntry®go.string."json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""°Dgo.string."IOServiceTimeRecursive"Ð2type.[]"".BlkioStatsEntryàÊgo.string."json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""€>go.string."IOWaitTimeRecursive" 2type.[]"".BlkioStatsEntry°¾go.string."json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""Ð:go.string."IOMergedRecursive"ð2type.[]"".BlkioStatsEntry€²go.string."json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\"" 6go.string."IOTimeRecursive"À2type.[]"".BlkioStatsEntryЪgo.string."json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""ð8go.string."SectorsRecursive"2type.[]"".BlkioStatsEntry ªgo.string."json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\""þ(go.string."[]uint64"@2[]uint64 (go.string."[]uint64"þtype.[]uint64  ?µi    runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P(go.string."[]uint64"p,go.weak.type.*[]uint64€"runtime.zerovaluetype.uint64þ:go.typelink.[]uint64/[]uint64type.[]uint64þbruntime.gcbits.0x48444400000000000000000000000000 HDDþ""..gostring.5°¦‚struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" } ""..gostring.5þ.go.string."PercpuUsage"@8 PercpuUsage .go.string."PercpuUsage"þ–go.string."json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\"" ˜;json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" –go.string."json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""þ6go.string."UsageInUsermode"@@UsageInUsermode 6go.string."UsageInUsermode"þªgo.string."json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""°¬Ejson:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" ªgo.string."json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""þ,go.string."TotalUsage"@6 +TotalUsage ,go.string."TotalUsage"þ’go.string."json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\"" ”9json:"total_usage,omitempty" yaml:"total_usage,omitempty" ’go.string."json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""þ:go.string."UsageInKernelmode"PDUsageInKernelmode :go.string."UsageInKernelmode"þ²go.string."json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\""À´Ijson:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" ²go.string."json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\""þŽtype.struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" }€€0Kù€ ($ à runtime.algarray0bruntime.gcbits.0x48444400000000000000000000000000P""..gostring.5p go.weak.type.*struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" }€"runtime.zerovalueÀŽtype.struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" }À.go.string."PercpuUsage"àtype.[]uint64ð–go.string."json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""6go.string."UsageInUsermode"°type.uint64Àªgo.string."json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""à,go.string."TotalUsage"€type.uint64’go.string."json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""°:go.string."UsageInKernelmode"Ðtype.uint64à²go.string."json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\""þ""..gostring.6‚°struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" } ""..gostring.6þ&go.string."Periods"00Periods &go.string."Periods"þLgo.string."json:\"periods,omitempty\""`Rjson:"periods,omitempty" Lgo.string."json:\"periods,omitempty\""þ8go.string."ThrottledPeriods"PBThrottledPeriods 8go.string."ThrottledPeriods"þ`go.string."json:\"throttled_periods,omitempty\""pf"json:"throttled_periods,omitempty" `go.string."json:\"throttled_periods,omitempty\""þ2go.string."ThrottledTime"@< ThrottledTime 2go.string."ThrottledTime"þZgo.string."json:\"throttled_time,omitempty\""``json:"throttled_time,omitempty" Zgo.string."json:\"throttled_time,omitempty\""þêtype.struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" }°°kÊÍo™  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P""..gostring.6pügo.weak.type.*struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" }€"runtime.zerovalueÀêtype.struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" }À&go.string."Periods"àtype.uint64ðLgo.string."json:\"periods,omitempty\""8go.string."ThrottledPeriods"°type.uint64À`go.string."json:\"throttled_periods,omitempty\""à2go.string."ThrottledTime"€type.uint64Zgo.string."json:\"throttled_time,omitempty\""þ8go.string."*docker.CPUStats"PB*docker.CPUStats 8go.string."*docker.CPUStats"þ"type.*"".CPUStats  ÛSÄ%6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*docker.CPUStats"p4go.weak.type.**"".CPUStats€"runtime.zerovalue type."".CPUStatsþbruntime.gcbits.0x48444444440000000000000000000000 HDDDDþ6go.string."docker.CPUStats"@@docker.CPUStats 6go.string."docker.CPUStats"þ(go.string."CPUUsage"@2CPUUsage (go.string."CPUUsage"þŠgo.string."json:\"cpu_usage,omitempty\" yaml:\"cpu_usage,omitempty\""Œ5json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" Šgo.string."json:\"cpu_usage,omitempty\" yaml:\"cpu_usage,omitempty\""þ4go.string."SystemCPUUsage"@>SystemCPUUsage 4go.string."SystemCPUUsage"þ¦go.string."json:\"system_cpu_usage,omitempty\" yaml:\"system_cpu_usage,omitempty\""°¨Cjson:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" ¦go.string."json:\"system_cpu_usage,omitempty\" yaml:\"system_cpu_usage,omitempty\""þ4go.string."ThrottlingData"@>ThrottlingData 4go.string."ThrottlingData"þ¢go.string."json:\"throttling_data,omitempty\" yaml:\"throttling_data,omitempty\""°¤Ajson:"throttling_data,omitempty" yaml:"throttling_data,omitempty" ¢go.string."json:\"throttling_data,omitempty\" yaml:\"throttling_data,omitempty\""þ(go.string."CPUStats"@2CPUStats (go.string."CPUStats"þ type."".CPUStats€€PF’Žõ08& à runtime.algarray0bruntime.gcbits.0x48444444440000000000000000000000P6go.string."docker.CPUStats"p"type.*"".CPUStats€"runtime.zerovalueÀ type."".CPUStatsÀ(go.string."CPUUsage"àŽtype.struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" }ðŠgo.string."json:\"cpu_usage,omitempty\" yaml:\"cpu_usage,omitempty\""4go.string."SystemCPUUsage"°type.uint64À¦go.string."json:\"system_cpu_usage,omitempty\" yaml:\"system_cpu_usage,omitempty\""à4go.string."ThrottlingData"€êtype.struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" }¢go.string."json:\"throttling_data,omitempty\" yaml:\"throttling_data,omitempty\""`° type."".CPUStats°(go.string."CPUStats"À"go.importpath."".Ѐ type."".CPUStatsþ,"type..gc."".StatsZþ*type..gcprog."".Stats22XeUUUUUUUUUU–eY–eYVUeUUþ0go.string."docker.Stats"@: docker.Stats 0go.string."docker.Stats"þ go.string."Read"0*Read go.string."Read"þvgo.string."json:\"read,omitempty\" yaml:\"read,omitempty\""€x+json:"read,omitempty" yaml:"read,omitempty" vgo.string."json:\"read,omitempty\" yaml:\"read,omitempty\""þ‚go.string."json:\"network,omitempty\" yaml:\"network,omitempty\""„1json:"network,omitempty" yaml:"network,omitempty" ‚go.string."json:\"network,omitempty\" yaml:\"network,omitempty\""þ.go.string."MemoryStats"@8 MemoryStats .go.string."MemoryStats"þ–go.string."json:\"memory_stats,omitempty\" yaml:\"memory_stats,omitempty\"" ˜;json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" –go.string."json:\"memory_stats,omitempty\" yaml:\"memory_stats,omitempty\""þ,go.string."BlkioStats"@6 +BlkioStats ,go.string."BlkioStats"þ’go.string."json:\"blkio_stats,omitempty\" yaml:\"blkio_stats,omitempty\"" ”9json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" ’go.string."json:\"blkio_stats,omitempty\" yaml:\"blkio_stats,omitempty\""þŠgo.string."json:\"cpu_stats,omitempty\" yaml:\"cpu_stats,omitempty\""Œ5json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" Šgo.string."json:\"cpu_stats,omitempty\" yaml:\"cpu_stats,omitempty\""þ.go.string."PreCPUStats"@8 PreCPUStats .go.string."PreCPUStats"þVgo.string."json:\"precpu_stats,omitempty\""`\json:"precpu_stats,omitempty" Vgo.string."json:\"precpu_stats,omitempty\""þtype."".StatsððÀŽ9¦YX` p: à runtime.algarray0"type..gc."".Stats@*type..gcprog."".StatsP0go.string."docker.Stats"ptype.*"".Stats€"runtime.zerovalueÀtype."".StatsÀ go.string."Read"àtype.time.Timeðvgo.string."json:\"read,omitempty\" yaml:\"read,omitempty\""&go.string."Network"°ø type.struct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" }À‚go.string."json:\"network,omitempty\" yaml:\"network,omitempty\""à.go.string."MemoryStats"€’-type.struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" }–go.string."json:\"memory_stats,omitempty\" yaml:\"memory_stats,omitempty\""°,go.string."BlkioStats"Ðætype.struct { IOServiceBytesRecursive []"".BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []"".BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []"".BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []"".BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []"".BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []"".BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []"".BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []"".BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" }à’go.string."json:\"blkio_stats,omitempty\" yaml:\"blkio_stats,omitempty\""€(go.string."CPUStats"  type."".CPUStats°Šgo.string."json:\"cpu_stats,omitempty\" yaml:\"cpu_stats,omitempty\""Ð.go.string."PreCPUStats"ð type."".CPUStats€Vgo.string."json:\"precpu_stats,omitempty\""` type."".Stats "go.string."Stats"°"go.importpath."".Àðtype."".Statsþ2go.string."*docker.Stats"@< *docker.Stats 2go.string."*docker.Stats"þtype.*"".Stats  ÉÂg6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."*docker.Stats"p.go.weak.type.**"".Stats€"runtime.zerovaluetype."".Statsþ@go.string."chan<- *docker.Stats"PJchan<- *docker.Stats @go.string."chan<- *docker.Stats"þ*type.chan<- *"".Stats°°®bB2   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."chan<- *docker.Stats"pgo.string."docker.StatsOptions"PHdocker.StatsOptions >go.string."docker.StatsOptions"þ&go.string."Timeout"00Timeout &go.string."Timeout"þ0go.string."StatsOptions"@: StatsOptions 0go.string."StatsOptions"þ(type."".StatsOptions  0ë þÑ (( 2type..alg."".StatsOptions0bruntime.gcbits.0x48484800000000000000000000000000P>go.string."docker.StatsOptions"p*type.*"".StatsOptions€"runtime.zerovalueÀ(type."".StatsOptionsÀgo.string."ID"àtype.string"go.string."Stats"°*type.chan<- *"".Statsà$go.string."Stream"€type.bool° go.string."Done"Ð type.<-chan bool€&go.string."Timeout" $type.time.Duration`Ð(type."".StatsOptionsÐ0go.string."StatsOptions"à"go.importpath."".ð (type."".StatsOptionsþvgo.string."func(*docker.Client, docker.StatsOptions) error"€€/func(*docker.Client, docker.StatsOptions) error vgo.string."func(*docker.Client, docker.StatsOptions) error"þXtype.func(*"".Client, "".StatsOptions) error°°ëVA3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pvgo.string."func(*docker.Client, docker.StatsOptions) error"pjgo.weak.type.*func(*"".Client, "".StatsOptions) error€"runtime.zerovalue €Xtype.func(*"".Client, "".StatsOptions) errorРXtype.func(*"".Client, "".StatsOptions) error€type.*"".Client(type."".StatsOptions type.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þBtype..hashfunc."".TagImageOptions:type..hash."".TagImageOptionsþ>type..eqfunc."".TagImageOptions6type..eq."".TagImageOptionsþ8type..alg."".TagImageOptions Btype..hashfunc."".TagImageOptions>type..eqfunc."".TagImageOptionsþFgo.string."*docker.TagImageOptions"PP*docker.TagImageOptions Fgo.string."*docker.TagImageOptions"þ0type.*"".TagImageOptions  ¼º 6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*docker.TagImageOptions"pBgo.weak.type.**"".TagImageOptions€"runtime.zerovalue.type."".TagImageOptionsþbruntime.gcbits.0x48488484440000000000000000000000 HH„„DþDgo.string."docker.TagImageOptions"PNdocker.TagImageOptions Dgo.string."docker.TagImageOptions"þ go.string."Repo"0*Repo go.string."Repo"þ6go.string."TagImageOptions"@@TagImageOptions 6go.string."TagImageOptions"þ.type."".TagImageOptions€€(þÊ# 8type..alg."".TagImageOptions0bruntime.gcbits.0x48488484440000000000000000000000PDgo.string."docker.TagImageOptions"p0type.*"".TagImageOptions€"runtime.zerovalueÀ.type."".TagImageOptionsÀ go.string."Repo"àtype.stringgo.string."Tag"°type.stringà"go.string."Force"€type.bool`°.type."".TagImageOptions°6go.string."TagImageOptions"À"go.importpath."".Ѐ.type."".TagImageOptionsþŒgo.string."func(*docker.Client, string, docker.TagImageOptions) error" –:func(*docker.Client, string, docker.TagImageOptions) error Œgo.string."func(*docker.Client, string, docker.TagImageOptions) error"þntype.func(*"".Client, string, "".TagImageOptions) errorÀÀ¨,™.3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŒgo.string."func(*docker.Client, string, docker.TagImageOptions) error"p€go.weak.type.*func(*"".Client, string, "".TagImageOptions) error€"runtime.zerovalue €ntype.func(*"".Client, string, "".TagImageOptions) errorаntype.func(*"".Client, string, "".TagImageOptions) error€type.*"".Clienttype.string .type."".TagImageOptions°type.errorþ:go.string."*docker.TopResult"PD*docker.TopResult :go.string."*docker.TopResult"þ$type.*"".TopResult  æg6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.TopResult"p6go.weak.type.**"".TopResult€"runtime.zerovalue"type."".TopResultþ8go.string."docker.TopResult"PBdocker.TopResult 8go.string."docker.TopResult"þ$go.string."Titles"0.Titles $go.string."Titles"þ*go.string."Processes"@4 Processes *go.string."Processes"þ*go.string."TopResult"@4 TopResult *go.string."TopResult"þ"type."".TopResult°°0ŽOÙ à runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P8go.string."docker.TopResult"p$type.*"".TopResult€"runtime.zerovalueÀ"type."".TopResultÀ$go.string."Titles"àtype.[]string*go.string."Processes"°type.[][]string`à"type."".TopResultà*go.string."TopResult"ð"go.importpath."".€°"type."".TopResultþ”go.string."func(*docker.Client, string, string) (docker.TopResult, error)" ž>func(*docker.Client, string, string) (docker.TopResult, error) ”go.string."func(*docker.Client, string, string) (docker.TopResult, error)"þvtype.func(*"".Client, string, string) ("".TopResult, error)ÐÐ8œ ’3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P”go.string."func(*docker.Client, string, string) (docker.TopResult, error)"pˆgo.weak.type.*func(*"".Client, string, string) ("".TopResult, error)€"runtime.zerovalue €vtype.func(*"".Client, string, string) ("".TopResult, error)аvtype.func(*"".Client, string, string) ("".TopResult, error)€type.*"".Clienttype.string type.string°"type."".TopResultÀtype.errorþjgo.string."func(*docker.Client, string) (int, error)"€t)func(*docker.Client, string) (int, error) jgo.string."func(*docker.Client, string) (int, error)"þTtype.func(*"".Client, string) (int, error)ÀÀšö13 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*docker.Client, string) (int, error)"pfgo.weak.type.*func(*"".Client, string) (int, error)€"runtime.zerovalue €Ttype.func(*"".Client, string) (int, error)РTtype.func(*"".Client, string) (int, error)€type.*"".Clienttype.string type.int°type.errorþºgo.string."func(*docker.Client, string, map[string]string, io.Reader, io.Writer, bool) error"ÐÄQfunc(*docker.Client, string, map[string]string, io.Reader, io.Writer, bool) error ºgo.string."func(*docker.Client, string, map[string]string, io.Reader, io.Writer, bool) error"þ¤type.func(*"".Client, string, map[string]string, io.Reader, io.Writer, bool) errorðð|4“Y3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pºgo.string."func(*docker.Client, string, map[string]string, io.Reader, io.Writer, bool) error"p¶go.weak.type.*func(*"".Client, string, map[string]string, io.Reader, io.Writer, bool) error€"runtime.zerovalue €¤type.func(*"".Client, string, map[string]string, io.Reader, io.Writer, bool) errorÐà¤type.func(*"".Client, string, map[string]string, io.Reader, io.Writer, bool) error€type.*"".Clienttype.string ,type.map[string]string°type.io.ReaderÀtype.io.WriterÐtype.boolàtype.errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ6type..hashfunc."".doOptions.type..hash."".doOptionsþ2type..eqfunc."".doOptions*type..eq."".doOptionsþ,type..alg."".doOptions 6type..hashfunc."".doOptions2type..eqfunc."".doOptionsþ:go.string."*docker.doOptions"PD*docker.doOptions :go.string."*docker.doOptions"þ$type.*"".doOptions  ä1z6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.doOptions"p6go.weak.type.**"".doOptions€"runtime.zerovalue"type."".doOptionsþbruntime.gcbits.0xccc44c00000000000000000000000000 ÌÄLþ8go.string."docker.doOptions"PBdocker.doOptions 8go.string."docker.doOptions"þ go.string."data"0*data go.string."data"þ*go.string."forceJSON"@4 forceJSON *go.string."forceJSON"þ*go.string."doOptions"@4 doOptions *go.string."doOptions"þ"type."".doOptions°°?¦ ,type..alg."".doOptions0bruntime.gcbits.0xccc44c00000000000000000000000000P8go.string."docker.doOptions"p$type.*"".doOptions€"runtime.zerovalueÀ"type."".doOptionsÀ go.string."data"Ð"go.importpath."".à"type.interface {}*go.string."forceJSON" "go.importpath."".°type.bool`à"type."".doOptionsà*go.string."doOptions"ð"go.importpath."".€°"type."".doOptionsþ°go.string."func(*docker.Client, string, string, docker.doOptions) ([]uint8, int, error)"ÀºLfunc(*docker.Client, string, string, docker.doOptions) ([]uint8, int, error) °go.string."func(*docker.Client, string, string, docker.doOptions) ([]uint8, int, error)"þ’type.func(*"".Client, string, string, "".doOptions) ([]uint8, int, error)ðð÷Æ.3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P°go.string."func(*docker.Client, string, string, docker.doOptions) ([]uint8, int, error)"p¤go.weak.type.*func(*"".Client, string, string, "".doOptions) ([]uint8, int, error)€"runtime.zerovalue €’type.func(*"".Client, string, string, "".doOptions) ([]uint8, int, error)ÐÀ’type.func(*"".Client, string, string, "".doOptions) ([]uint8, int, error)€type.*"".Clienttype.string type.string°"type."".doOptionsÀtype.[]uint8Ðtype.intàtype.errorþ¢go.string."func(*docker.Client, int64, chan *docker.APIEvents, chan error) error"°¬Efunc(*docker.Client, int64, chan *docker.APIEvents, chan error) error ¢go.string."func(*docker.Client, int64, chan *docker.APIEvents, chan error) error"þ„type.func(*"".Client, int64, chan *"".APIEvents, chan error) errorÐÐç8[53 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P¢go.string."func(*docker.Client, int64, chan *docker.APIEvents, chan error) error"p–go.weak.type.*func(*"".Client, int64, chan *"".APIEvents, chan error) error€"runtime.zerovalue €„type.func(*"".Client, int64, chan *"".APIEvents, chan error) errorÐÀ„type.func(*"".Client, int64, chan *"".APIEvents, chan error) error€type.*"".Clienttype.int64 .type.chan *"".APIEvents°type.chan errorÀtype.errorþ`go.string."func(*docker.Client) (string, error)"pj$func(*docker.Client) (string, error) `go.string."func(*docker.Client) (string, error)"þJtype.func(*"".Client) (string, error)°°Ýë G3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*docker.Client) (string, error)"p\go.weak.type.*func(*"".Client) (string, error)€"runtime.zerovalue €Jtype.func(*"".Client) (string, error)ÐJtype.func(*"".Client) (string, error)€type.*"".Clienttype.string type.errorþ^go.string."func(*docker.Client, string) string"ph#func(*docker.Client, string) string ^go.string."func(*docker.Client, string) string"þHtype.func(*"".Client, string) string°°Eh´3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*docker.Client, string) string"pZgo.weak.type.*func(*"".Client, string) string€"runtime.zerovalue €Htype.func(*"".Client, string) stringРHtype.func(*"".Client, string) string€type.*"".Clienttype.string type.stringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·af3107c17ee1ab6f9f33230b5c7e3062þTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ>type..hashfunc."".hijackOptions6type..hash."".hijackOptionsþ:type..eqfunc."".hijackOptions2type..eq."".hijackOptionsþ4type..alg."".hijackOptions >type..hashfunc."".hijackOptions:type..eqfunc."".hijackOptionsþBgo.string."*docker.hijackOptions"PL*docker.hijackOptions Bgo.string."*docker.hijackOptions"þ,type.*"".hijackOptions  èªdã6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*docker.hijackOptions"p>go.weak.type.**"".hijackOptions€"runtime.zerovalue*type."".hijackOptionsþbruntime.gcbits.0x488c8c8ccc0000000000000000000000 HŒŒŒÌþ@go.string."docker.hijackOptions"PJdocker.hijackOptions @go.string."docker.hijackOptions"þ&go.string."success"00success &go.string."success"þ4go.string."setRawTerminal"@>setRawTerminal 4go.string."setRawTerminal"þgo.string."in"0&in go.string."in"þ$go.string."stdout"0.stdout $go.string."stdout"þ$go.string."stderr"0.stderr $go.string."stderr"þ2go.string."hijackOptions"@< hijackOptions 2go.string."hijackOptions"þ*type."".hijackOptionsððPÿöÌ 0@8 4type..alg."".hijackOptions0bruntime.gcbits.0x488c8c8ccc0000000000000000000000P@go.string."docker.hijackOptions"p,type.*"".hijackOptions€"runtime.zerovalueÀ*type."".hijackOptionsÀ&go.string."success"Ð"go.importpath."".à&type.chan struct {}4go.string."setRawTerminal" "go.importpath."".°type.boolàgo.string."in"ð"go.importpath."".€type.io.Reader°$go.string."stdout"À"go.importpath."".Ðtype.io.Writer€$go.string."stderr""go.importpath."". type.io.WriterÐ go.string."data"à"go.importpath."".ð"type.interface {}` *type."".hijackOptions 2go.string."hijackOptions"°"go.importpath."".Àð*type."".hijackOptionsþ˜go.string."func(*docker.Client, string, string, docker.hijackOptions) error"°¢@func(*docker.Client, string, string, docker.hijackOptions) error ˜go.string."func(*docker.Client, string, string, docker.hijackOptions) error"þztype.func(*"".Client, string, string, "".hijackOptions) errorÐЊ+í™3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P˜go.string."func(*docker.Client, string, string, docker.hijackOptions) error"pŒgo.weak.type.*func(*"".Client, string, string, "".hijackOptions) error€"runtime.zerovalue €ztype.func(*"".Client, string, string, "".hijackOptions) errorÐÀztype.func(*"".Client, string, string, "".hijackOptions) error€type.*"".Clienttype.string type.string°*type."".hijackOptionsÀtype.errorþBgo.string."*docker.streamOptions"PL*docker.streamOptions Bgo.string."*docker.streamOptions"þ,type.*"".streamOptions  Õ„·46   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*docker.streamOptions"p>go.weak.type.**"".streamOptions€"runtime.zerovalue*type."".streamOptionsþbruntime.gcbits.0x848c8c8c44c8c8c84800000000000000 „ŒŒŒDÈÈÈHþ@go.string."docker.streamOptions"PJdocker.streamOptions @go.string."docker.streamOptions"þ2go.string."rawJSONStream"@< rawJSONStream 2go.string."rawJSONStream"þ4go.string."useJSONDecoder"@>useJSONDecoder 4go.string."useJSONDecoder"þ&go.string."headers"00headers &go.string."headers"þ&go.string."timeout"00timeout &go.string."timeout"þ2go.string."streamOptions"@< streamOptions 2go.string."streamOptions"þ*type."".streamOptionsHµÃŠ\ 0@D à runtime.algarray0bruntime.gcbits.0x848c8c8c44c8c8c84800000000000000P@go.string."docker.streamOptions"p,type.*"".streamOptions€"runtime.zerovalueÀ*type."".streamOptionsÀ4go.string."setRawTerminal"Ð"go.importpath."".àtype.bool2go.string."rawJSONStream" "go.importpath."".°type.boolà4go.string."useJSONDecoder"ð"go.importpath."".€type.bool°&go.string."headers"À"go.importpath."".Ð,type.map[string]string€go.string."in""go.importpath."". type.io.ReaderÐ$go.string."stdout"à"go.importpath."".ðtype.io.Writer $go.string."stderr"°"go.importpath."".Àtype.io.Writerð&go.string."timeout"€"go.importpath."".$type.time.Duration`À*type."".streamOptionsÀ2go.string."streamOptions"Ð"go.importpath."".à*type."".streamOptionsþ˜go.string."func(*docker.Client, string, string, docker.streamOptions) error"°¢@func(*docker.Client, string, string, docker.streamOptions) error ˜go.string."func(*docker.Client, string, string, docker.streamOptions) error"þztype.func(*"".Client, string, string, "".streamOptions) errorÐÐðÅ 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P˜go.string."func(*docker.Client, string, string, docker.streamOptions) error"pŒgo.weak.type.*func(*"".Client, string, string, "".streamOptions) error€"runtime.zerovalue €ztype.func(*"".Client, string, string, "".streamOptions) errorÐÀztype.func(*"".Client, string, string, "".streamOptions) error€type.*"".Clienttype.string type.string°*type."".streamOptionsÀtype.errorþ8go.string."AddEventListener"PBAddEventListener 8go.string."AddEventListener"þ:go.string."AttachToContainer"PDAttachToContainer :go.string."AttachToContainer"þngo.string."func(docker.AttachToContainerOptions) error"€x+func(docker.AttachToContainerOptions) error ngo.string."func(docker.AttachToContainerOptions) error"þXtype.func("".AttachToContainerOptions) error   rÔ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(docker.AttachToContainerOptions) error"pjgo.weak.type.*func("".AttachToContainerOptions) error€"runtime.zerovalue €Xtype.func("".AttachToContainerOptions) errorÐXtype.func("".AttachToContainerOptions) error€@type."".AttachToContainerOptionstype.errorþ*go.string."AuthCheck"@4 AuthCheck *go.string."AuthCheck"þbgo.string."func(*docker.AuthConfiguration) error"pl%func(*docker.AuthConfiguration) error bgo.string."func(*docker.AuthConfiguration) error"þLtype.func(*"".AuthConfiguration) error  QKï3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."func(*docker.AuthConfiguration) error"p^go.weak.type.*func(*"".AuthConfiguration) error€"runtime.zerovalue €Ltype.func(*"".AuthConfiguration) errorÐLtype.func(*"".AuthConfiguration) error€4type.*"".AuthConfigurationtype.errorþ,go.string."BuildImage"@6 +BuildImage ,go.string."BuildImage"þ`go.string."func(docker.BuildImageOptions) error"pj$func(docker.BuildImageOptions) error `go.string."func(docker.BuildImageOptions) error"þJtype.func("".BuildImageOptions) error  ìe¿z3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(docker.BuildImageOptions) error"p\go.weak.type.*func("".BuildImageOptions) error€"runtime.zerovalue €Jtype.func("".BuildImageOptions) errorÐJtype.func("".BuildImageOptions) error€2type."".BuildImageOptionstype.errorþ6go.string."CommitContainer"@@CommitContainer 6go.string."CommitContainer"þŒgo.string."func(docker.CommitContainerOptions) (*docker.Image, error)" –:func(docker.CommitContainerOptions) (*docker.Image, error) Œgo.string."func(docker.CommitContainerOptions) (*docker.Image, error)"þntype.func("".CommitContainerOptions) (*"".Image, error)°°Žþ†%3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŒgo.string."func(docker.CommitContainerOptions) (*docker.Image, error)"p€go.weak.type.*func("".CommitContainerOptions) (*"".Image, error)€"runtime.zerovalue €ntype.func("".CommitContainerOptions) (*"".Image, error)Ðntype.func("".CommitContainerOptions) (*"".Image, error)€func(docker.CreateContainerOptions) (*docker.Container, error) ”go.string."func(docker.CreateContainerOptions) (*docker.Container, error)"þvtype.func("".CreateContainerOptions) (*"".Container, error)°°#å R3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P”go.string."func(docker.CreateContainerOptions) (*docker.Container, error)"pˆgo.weak.type.*func("".CreateContainerOptions) (*"".Container, error)€"runtime.zerovalue €vtype.func("".CreateContainerOptions) (*"".Container, error)Ðvtype.func("".CreateContainerOptions) (*"".Container, error)€ListContainers 4go.string."ListContainers"þœgo.string."func(docker.ListContainersOptions) ([]docker.APIContainers, error)"°¦Bfunc(docker.ListContainersOptions) ([]docker.APIContainers, error) œgo.string."func(docker.ListContainersOptions) ([]docker.APIContainers, error)"þ~type.func("".ListContainersOptions) ([]"".APIContainers, error)°°]æî3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pœgo.string."func(docker.ListContainersOptions) ([]docker.APIContainers, error)"pgo.weak.type.*func("".ListContainersOptions) ([]"".APIContainers, error)€"runtime.zerovalue €~type.func("".ListContainersOptions) ([]"".APIContainers, error)Ð~type.func("".ListContainersOptions) ([]"".APIContainers, error)€:type."".ListContainersOptions.type.[]"".APIContainers type.errorþ,go.string."ListImages"@6 +ListImages ,go.string."ListImages"þŒgo.string."func(docker.ListImagesOptions) ([]docker.APIImages, error)" –:func(docker.ListImagesOptions) ([]docker.APIImages, error) Œgo.string."func(docker.ListImagesOptions) ([]docker.APIImages, error)"þntype.func("".ListImagesOptions) ([]"".APIImages, error)°°‚¹8¾3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŒgo.string."func(docker.ListImagesOptions) ([]docker.APIImages, error)"p€go.weak.type.*func("".ListImagesOptions) ([]"".APIImages, error)€"runtime.zerovalue €ntype.func("".ListImagesOptions) ([]"".APIImages, error)Ðntype.func("".ListImagesOptions) ([]"".APIImages, error)€2type."".ListImagesOptions&type.[]"".APIImages type.errorþ0go.string."ListNetworks"@: ListNetworks 0go.string."ListNetworks"þXgo.string."func() ([]docker.Network, error)"pb func() ([]docker.Network, error) Xgo.string."func() ([]docker.Network, error)"þBtype.func() ([]"".Network, error)  ¢\¡3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."func() ([]docker.Network, error)"pTgo.weak.type.*func() ([]"".Network, error)€"runtime.zerovalue €Btype.func() ([]"".Network, error)ЀBtype.func() ([]"".Network, error)€"type.[]"".Networktype.errorþ*go.string."LoadImage"@4 LoadImage *go.string."LoadImage"þ^go.string."func(docker.LoadImageOptions) error"ph#func(docker.LoadImageOptions) error ^go.string."func(docker.LoadImageOptions) error"þHtype.func("".LoadImageOptions) error  é=ëV3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(docker.LoadImageOptions) error"pZgo.weak.type.*func("".LoadImageOptions) error€"runtime.zerovalue €Htype.func("".LoadImageOptions) errorÐHtype.func("".LoadImageOptions) error€0type."".LoadImageOptionstype.errorþTgo.string."func(docker.LogsOptions) error"`^func(docker.LogsOptions) error Tgo.string."func(docker.LogsOptions) error"þ>type.func("".LogsOptions) error  Fˆ“3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."func(docker.LogsOptions) error"pPgo.weak.type.*func("".LogsOptions) error€"runtime.zerovalue €>type.func("".LogsOptions) errorÐ>type.func("".LogsOptions) error€&type."".LogsOptionstype.errorþ.go.string."NetworkInfo"@8 NetworkInfo .go.string."NetworkInfo"þbgo.string."func(string) (*docker.Network, error)"pl%func(string) (*docker.Network, error) bgo.string."func(string) (*docker.Network, error)"þLtype.func(string) (*"".Network, error)°°^˜—3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."func(string) (*docker.Network, error)"p^go.weak.type.*func(string) (*"".Network, error)€"runtime.zerovalue €Ltype.func(string) (*"".Network, error)ÐLtype.func(string) (*"".Network, error)€type.string type.*"".Network type.errorþ4go.string."PauseContainer"@>PauseContainer 4go.string."PauseContainer"þgo.string."RemoveEventListener"PHRemoveEventListener >go.string."RemoveEventListener"þ\go.string."func(chan *docker.APIEvents) error"pf"func(chan *docker.APIEvents) error \go.string."func(chan *docker.APIEvents) error"þFtype.func(chan *"".APIEvents) error  h9!ƒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(chan *docker.APIEvents) error"pXgo.weak.type.*func(chan *"".APIEvents) error€"runtime.zerovalue €Ftype.func(chan *"".APIEvents) errorÐFtype.func(chan *"".APIEvents) error€.type.chan *"".APIEventstype.errorþ.go.string."RemoveImage"@8 RemoveImage .go.string."RemoveImage"þ>go.string."RemoveImageExtended"PHRemoveImageExtended >go.string."RemoveImageExtended"þrgo.string."func(string, docker.RemoveImageOptions) error"€|-func(string, docker.RemoveImageOptions) error rgo.string."func(string, docker.RemoveImageOptions) error"þ\type.func(string, "".RemoveImageOptions) error°°¡ûr^3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Prgo.string."func(string, docker.RemoveImageOptions) error"pngo.weak.type.*func(string, "".RemoveImageOptions) error€"runtime.zerovalue €\type.func(string, "".RemoveImageOptions) errorР\type.func(string, "".RemoveImageOptions) error€type.string4type."".RemoveImageOptions type.errorþ6go.string."RenameContainer"@@RenameContainer 6go.string."RenameContainer"þjgo.string."func(docker.RenameContainerOptions) error"€t)func(docker.RenameContainerOptions) error jgo.string."func(docker.RenameContainerOptions) error"þTtype.func("".RenameContainerOptions) error  Á•íÛ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(docker.RenameContainerOptions) error"pfgo.weak.type.*func("".RenameContainerOptions) error€"runtime.zerovalue €Ttype.func("".RenameContainerOptions) errorÐTtype.func("".RenameContainerOptions) error€StartContainer 4go.string."StartContainer"þdgo.string."func(string, *docker.HostConfig) error"pn&func(string, *docker.HostConfig) error dgo.string."func(string, *docker.HostConfig) error"þNtype.func(string, *"".HostConfig) error°°é9™3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pdgo.string."func(string, *docker.HostConfig) error"p`go.weak.type.*func(string, *"".HostConfig) error€"runtime.zerovalue €Ntype.func(string, *"".HostConfig) errorРNtype.func(string, *"".HostConfig) error€type.string&type.*"".HostConfig type.errorþ*go.string."StartExec"@4 StartExec *go.string."StartExec"þngo.string."func(string, docker.StartExecOptions) error"€x+func(string, docker.StartExecOptions) error ngo.string."func(string, docker.StartExecOptions) error"þXtype.func(string, "".StartExecOptions) error°°¶œóƒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(string, docker.StartExecOptions) error"pjgo.weak.type.*func(string, "".StartExecOptions) error€"runtime.zerovalue €Xtype.func(string, "".StartExecOptions) errorРXtype.func(string, "".StartExecOptions) error€type.string0type."".StartExecOptions type.errorþVgo.string."func(docker.StatsOptions) error"``func(docker.StatsOptions) error Vgo.string."func(docker.StatsOptions) error"þ@type.func("".StatsOptions) error  ë’u +3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PVgo.string."func(docker.StatsOptions) error"pRgo.weak.type.*func("".StatsOptions) error€"runtime.zerovalue €@type.func("".StatsOptions) errorÐ@type.func("".StatsOptions) error€(type."".StatsOptionstype.errorþ2go.string."StopContainer"@< StopContainer 2go.string."StopContainer"þ(go.string."TagImage"@2TagImage (go.string."TagImage"þlgo.string."func(string, docker.TagImageOptions) error"€v*func(string, docker.TagImageOptions) error lgo.string."func(string, docker.TagImageOptions) error"þVtype.func(string, "".TagImageOptions) error°°yÓM3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(string, docker.TagImageOptions) error"phgo.weak.type.*func(string, "".TagImageOptions) error€"runtime.zerovalue €Vtype.func(string, "".TagImageOptions) errorРVtype.func(string, "".TagImageOptions) error€type.string.type."".TagImageOptions type.errorþ0go.string."TopContainer"@: TopContainer 0go.string."TopContainer"þtgo.string."func(string, string) (docker.TopResult, error)"€~.func(string, string) (docker.TopResult, error) tgo.string."func(string, string) (docker.TopResult, error)"þ^type.func(string, string) ("".TopResult, error)ÀÀ’¡ê¸3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."func(string, string) (docker.TopResult, error)"ppgo.weak.type.*func(string, string) ("".TopResult, error)€"runtime.zerovalue €^type.func(string, string) ("".TopResult, error)Р^type.func(string, string) ("".TopResult, error)€type.stringtype.string "type."".TopResult°type.errorþ8go.string."UnpauseContainer"PBUnpauseContainer 8go.string."UnpauseContainer"þ&go.string."Version"00Version &go.string."Version"þ2go.string."WaitContainer"@< WaitContainer 2go.string."WaitContainer"þJgo.string."func(string) (int, error)"`Tfunc(string) (int, error) Jgo.string."func(string) (int, error)"þtype.func("".LogsOptions) error Vtype.func(*"".Client, "".LogsOptions) error°""".(*Client).LogsÀ""".(*Client).LogsÐ.go.string."NetworkInfo"ðLtype.func(string) (*"".Network, error)€dtype.func(*"".Client, string) (*"".Network, error)0"".(*Client).NetworkInfo 0"".(*Client).NetworkInfo°4go.string."PauseContainer"Ð.type.func(string) erroràFtype.func(*"".Client, string) errorð6"".(*Client).PauseContainer€6"".(*Client).PauseContainer go.string."Ping"°"type.func() errorÀ6type.func(*"".Client) errorÐ""".(*Client).Pingà""".(*Client).Pingð*go.string."PullImage"ttype.func("".PullImageOptions, "".AuthConfiguration) error Œtype.func(*"".Client, "".PullImageOptions, "".AuthConfiguration) error°,"".(*Client).PullImageÀ,"".(*Client).PullImageÐ*go.string."PushImage"ðttype.func("".PushImageOptions, "".AuthConfiguration) error€Œtype.func(*"".Client, "".PushImageOptions, "".AuthConfiguration) error,"".(*Client).PushImage ,"".(*Client).PushImage°6go.string."RemoveContainer"ÐTtype.func("".RemoveContainerOptions) erroràltype.func(*"".Client, "".RemoveContainerOptions) errorð8"".(*Client).RemoveContainer€8"".(*Client).RemoveContainer>go.string."RemoveEventListener"°Ftype.func(chan *"".APIEvents) errorÀ^type.func(*"".Client, chan *"".APIEvents) errorÐ@"".(*Client).RemoveEventListenerà@"".(*Client).RemoveEventListenerð.go.string."RemoveImage".type.func(string) error Ftype.func(*"".Client, string) error°0"".(*Client).RemoveImageÀ0"".(*Client).RemoveImageÐ>go.string."RemoveImageExtended"ð\type.func(string, "".RemoveImageOptions) error€ttype.func(*"".Client, string, "".RemoveImageOptions) error@"".(*Client).RemoveImageExtended @"".(*Client).RemoveImageExtended°6go.string."RenameContainer"ÐTtype.func("".RenameContainerOptions) erroràltype.func(*"".Client, "".RenameContainerOptions) errorð8"".(*Client).RenameContainer€8"".(*Client).RenameContainer"".(*Client).ResizeContainerTTYà>"".(*Client).ResizeContainerTTYð2go.string."ResizeExecTTY"Btype.func(string, int, int) error Ztype.func(*"".Client, string, int, int) error°4"".(*Client).ResizeExecTTYÀ4"".(*Client).ResizeExecTTYÐ8go.string."RestartContainer"ð:type.func(string, uint) error€Rtype.func(*"".Client, string, uint) error:"".(*Client).RestartContainer :"".(*Client).RestartContainer°0go.string."SearchImages"Ð\type.func(string) ([]"".APIImageSearch, error)àttype.func(*"".Client, string) ([]"".APIImageSearch, error)ð2"".(*Client).SearchImages€2"".(*Client).SearchImages4go.string."StartContainer"°Ntype.func(string, *"".HostConfig) errorÀftype.func(*"".Client, string, *"".HostConfig) errorÐ6"".(*Client).StartContainerà6"".(*Client).StartContainerð*go.string."StartExec" Xtype.func(string, "".StartExecOptions) error  ptype.func(*"".Client, string, "".StartExecOptions) error° ,"".(*Client).StartExecÀ ,"".(*Client).StartExecÐ "go.string."Stats"ð @type.func("".StatsOptions) error€!Xtype.func(*"".Client, "".StatsOptions) error!$"".(*Client).Stats !$"".(*Client).Stats°!2go.string."StopContainer"Ð!:type.func(string, uint) errorà!Rtype.func(*"".Client, string, uint) errorð!4"".(*Client).StopContainer€"4"".(*Client).StopContainer"(go.string."TagImage"°"Vtype.func(string, "".TagImageOptions) errorÀ"ntype.func(*"".Client, string, "".TagImageOptions) errorÐ"*"".(*Client).TagImageà"*"".(*Client).TagImageð"0go.string."TopContainer"#^type.func(string, string) ("".TopResult, error) #vtype.func(*"".Client, string, string) ("".TopResult, error)°#2"".(*Client).TopContainerÀ#2"".(*Client).TopContainerÐ#8go.string."UnpauseContainer"ð#.type.func(string) error€$Ftype.func(*"".Client, string) error$:"".(*Client).UnpauseContainer $:"".(*Client).UnpauseContainer°$&go.string."Version"Ð$8type.func() (*"".Env, error)à$Ltype.func(*"".Client) (*"".Env, error)ð$("".(*Client).Version€%("".(*Client).Version%2go.string."WaitContainer"°%dockerCertPath 4go.string."dockerCertPath"þ*go.string."dockerEnv"@4 dockerEnv *go.string."dockerEnv"þ"type."".dockerEnv€€(‹A;?& ,type..alg."".dockerEnv0bruntime.gcbits.0x48848444480000000000000000000000P8go.string."docker.dockerEnv"p$type.*"".dockerEnv€"runtime.zerovalueÀ"type."".dockerEnvÀ,go.string."dockerHost"Ð"go.importpath."".àtype.string6go.string."dockerTLSVerify" "go.importpath."".°type.boolà4go.string."dockerCertPath"ð"go.importpath."".€type.string`°"type."".dockerEnv°*go.string."dockerEnv"À"go.importpath."".Ѐ"type."".dockerEnvþ:go.string."*docker.dockerEnv"PD*docker.dockerEnv :go.string."*docker.dockerEnv"þ$type.*"".dockerEnv  ^™y6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*docker.dockerEnv"p6go.weak.type.**"".dockerEnv€"runtime.zerovalue"type."".dockerEnvþ:go.string."[]tls.Certificate"PD[]tls.Certificate :go.string."[]tls.Certificate"þ:type.[]crypto/tls.Certificate  ½îBÝ   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P:go.string."[]tls.Certificate"pLgo.weak.type.*[]crypto/tls.Certificate€"runtime.zerovalue6type.crypto/tls.Certificateþlgo.typelink.[]tls.Certificate/[]crypto/tls.Certificate:type.[]crypto/tls.Certificateþbruntime.gcbits.0x48c48c448844cc488400000000000000 HÄŒDˆDÌH„þqã"   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P&go.string."[][]int"p*go.weak.type.*[][]int€"runtime.zerovaluetype.[]intþ6go.typelink.[][]int/[][]inttype.[][]intþ(go.string."[8][]int"@2[8][]int (go.string."[8][]int"þtype.[8][]intÀÀÀ̼î à runtime.algarray0bruntime.gcbits.0x48844448844448844448844400000000P(go.string."[8][]int"p,go.weak.type.*[8][]int€"runtime.zerovaluetype.[]int type.[][]intþ:go.typelink.[8][]int/[8][]inttype.[8][]intþHgo.string."*map.bucket[string][]int"`R*map.bucket[string][]int Hgo.string."*map.bucket[string][]int"þ:type.*map.bucket[string][]int  Ÿ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."*map.bucket[string][]int"pLgo.weak.type.**map.bucket[string][]int€"runtime.zerovalue8type.map.bucket[string][]intþ,@type..gc.map.bucket[string][]int,þHtype..gcprog.map.bucket[string][]int*™™™™Y–eY–e þFgo.string."map.bucket[string][]int"PPmap.bucket[string][]int Fgo.string."map.bucket[string][]int"þ8type.map.bucket[string][]int°°P< +0ºYˆH à runtime.algarray0@type..gc.map.bucket[string][]int@Htype..gcprog.map.bucket[string][]intPFgo.string."map.bucket[string][]int"pJgo.weak.type.*map.bucket[string][]int€"runtime.zerovalueÀ8type.map.bucket[string][]intÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8][]intà(go.string."overflow"€:type.*map.bucket[string][]intþ@go.string."map.hdr[string][]int"PJmap.hdr[string][]int @go.string."map.hdr[string][]int"þ2type.map.hdr[string][]intàà0ŒÄ™  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000P@go.string."map.hdr[string][]int"pDgo.weak.type.*map.hdr[string][]int€"runtime.zerovalueÀ2type.map.hdr[string][]intÀ&go.string."buckets"à:type.*map.bucket[string][]int,go.string."oldbuckets"°:type.*map.bucket[string][]intþ8go.string."map[string][]int"PBmap[string][]int 8go.string."map[string][]int"þ*type.map[string][]intÜÜ£(.³5P € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."map[string][]int"pgo.string."*[1]tls.Certificate"PH*[1]tls.Certificate >go.string."*[1]tls.Certificate"þ>type.*[1]crypto/tls.Certificate  Y +õµ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*[1]tls.Certificate"pPgo.weak.type.**[1]crypto/tls.Certificate€"runtime.zerovaluego.string."*docker.jsonMessage"PH*docker.jsonMessage >go.string."*docker.jsonMessage"þ(type.*"".jsonMessage  s°[6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*docker.jsonMessage"p:go.weak.type.**"".jsonMessage€"runtime.zerovalue&type."".jsonMessageþDgo.string."*map.hdr[string]string"PN*map.hdr[string]string Dgo.string."*map.hdr[string]string"þ6type.*map.hdr[string]string  ºÆ¼6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."*map.hdr[string]string"pHgo.weak.type.**map.hdr[string]string€"runtime.zerovalue4type.map.hdr[string]stringþDgo.string."map.iter[string]string"PNmap.iter[string]string Dgo.string."map.iter[string]string"þ6type.map.iter[string]stringððP¹…\ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PDgo.string."map.iter[string]string"pHgo.weak.type.*map.iter[string]string€"runtime.zerovalueÀ6type.map.iter[string]stringÀgo.string."key"àtype.*stringgo.string."val"°type.*stringàgo.string."t"€type.*uint8°go.string."h"Ð6type.*map.hdr[string]string€&go.string."buckets" go.typelink.chan bool/chan booltype.chan boolþ,go.string."*chan bool"@6 +*chan bool ,go.string."*chan bool"þtype.*chan bool  IJË6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*chan bool"p0go.weak.type.**chan bool€"runtime.zerovaluetype.chan boolþ.go.string."*chan error"@8 *chan error .go.string."*chan error"þ type.*chan error  o ¨{6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P.go.string."*chan error"p2go.weak.type.**chan error€"runtime.zerovaluetype.chan errorþ4go.string."**bufio.Reader"@>**bufio.Reader 4go.string."**bufio.Reader"þ&type.**bufio.Reader  ’ï6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."**bufio.Reader"p8go.weak.type.***bufio.Reader€"runtime.zerovalue$type.*bufio.Readerþbruntime.gcbits.0x84884888880000000000000000000000 „ˆHˆˆþØgo.string."struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader }"ðâ`struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader } Øgo.string."struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader }"þgo.string."F"0$F go.string."F"þgo.string."A0"0&A0 go.string."A0"þgo.string."A1"0&A1 go.string."A1"þgo.string."A2"0&A2 go.string."A2"þgo.string."A3"0&A3 go.string."A3"þÂtype.struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }ÐÐ(ê-¤—  runtime.algarray0bruntime.gcbits.0x84884888880000000000000000000000PØgo.string."struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader }"pÔgo.weak.type.*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }€"runtime.zerovalueÀÂtype.struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }Àgo.string."F"àtype.uintptrgo.string."A0"°type.*chan boolàgo.string."A1"€ type.*chan error°go.string."A2"Ð,type.*"".hijackOptions€go.string."A3" &type.**bufio.Readerþbruntime.gcbits.0x84880000000000000000000000000000 „ˆþ°go.string."struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error }"ÀºLstruct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error } °go.string."struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error }"þštype.struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }€€ ÝÕÈ0  runtime.algarray0bruntime.gcbits.0x84880000000000000000000000000000P°go.string."struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error }"p¬go.weak.type.*struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }€"runtime.zerovalueÀštype.struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }Àgo.string."F"àtype.uintptrgo.string."A0"°,type.*"".hijackOptionsàgo.string."A1"€type.*net.Conn°go.string."A2"Ð type.*chan errorþÚgo.string."*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader }"ðäa*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader } Úgo.string."*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader }"þÄtype.*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }  ² ø6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PÚgo.string."*struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *docker.hijackOptions; A3 **bufio.Reader }"pÖgo.weak.type.**struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }€"runtime.zerovalueÂtype.struct { F uintptr; A0 *chan bool; A1 *chan error; A2 *"".hijackOptions; A3 **bufio.Reader }þ²go.string."*struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error }"À¼M*struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error } ²go.string."*struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error }"þœtype.*struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }  éï¥6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P²go.string."*struct { F uintptr; A0 *docker.hijackOptions; A1 *net.Conn; A2 *chan error }"p®go.weak.type.**struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }€"runtime.zerovalueštype.struct { F uintptr; A0 *"".hijackOptions; A1 *net.Conn; A2 *chan error }þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.weak.type.**[]"".PortBinding€"runtime.zerovalue*type.[]"".PortBindingþjgo.string."*map.hdr[docker.Port][]docker.PortBinding"€t)*map.hdr[docker.Port][]docker.PortBinding jgo.string."*map.hdr[docker.Port][]docker.PortBinding"þLtype.*map.hdr["".Port][]"".PortBinding  Gi0–6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."*map.hdr[docker.Port][]docker.PortBinding"p^go.weak.type.**map.hdr["".Port][]"".PortBinding€"runtime.zerovalueJtype.map.hdr["".Port][]"".PortBindingþjgo.string."map.iter[docker.Port][]docker.PortBinding"€t)map.iter[docker.Port][]docker.PortBinding jgo.string."map.iter[docker.Port][]docker.PortBinding"þLtype.map.iter["".Port][]"".PortBindingððP.àGv (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000Pjgo.string."map.iter[docker.Port][]docker.PortBinding"p^go.weak.type.*map.iter["".Port][]"".PortBinding€"runtime.zerovalueÀLtype.map.iter["".Port][]"".PortBindingÀgo.string."key"àtype.*"".Portgo.string."val"°,type.*[]"".PortBindingàgo.string."t"€type.*uint8°go.string."h"ÐLtype.*map.hdr["".Port][]"".PortBinding€&go.string."buckets" Rtype.*map.bucket["".Port][]"".PortBindingÐ go.string."bptr"ðRtype.*map.bucket["".Port][]"".PortBinding "go.string."other"Àtype.[4]uintptrþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·e13351f28add7c60853cb3aac0a0e34eþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þBtype..hashfunc."".NoSuchContainer:type..hash."".NoSuchContainerþ>type..eqfunc."".NoSuchContainer6type..eq."".NoSuchContainerþ8type..alg."".NoSuchContainer Btype..hashfunc."".NoSuchContainer>type..eqfunc."".NoSuchContainerþFgo.string."*docker.NoSuchContainer"PP*docker.NoSuchContainer Fgo.string."*docker.NoSuchContainer"þ`go.string."func(*docker.NoSuchContainer) string"pj$func(*docker.NoSuchContainer) string `go.string."func(*docker.NoSuchContainer) string"þJtype.func(*"".NoSuchContainer) string  g= û3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*docker.NoSuchContainer) string"p\go.weak.type.*func(*"".NoSuchContainer) string€"runtime.zerovalue €Jtype.func(*"".NoSuchContainer) stringÐJtype.func(*"".NoSuchContainer) string€0type.*"".NoSuchContainertype.stringþ0type.*"".NoSuchContainerÐÐmc0 +6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*docker.NoSuchContainer"pBgo.weak.type.**"".NoSuchContainer€"runtime.zerovalue.type."".NoSuchContainer` 0type.*"".NoSuchContainerÀð0type.*"".NoSuchContainerð"go.string."Error"$type.func() string Jtype.func(*"".NoSuchContainer) string°6"".(*NoSuchContainer).ErrorÀ6"".(*NoSuchContainer).ErrorþDgo.string."docker.NoSuchContainer"PNdocker.NoSuchContainer Dgo.string."docker.NoSuchContainer"þgo.string."Err"0(Err go.string."Err"þ6go.string."NoSuchContainer"@@NoSuchContainer 6go.string."NoSuchContainer"þ.type."".NoSuchContainer°° ¢†7p 8type..alg."".NoSuchContainer0bruntime.gcbits.0x488c0000000000000000000000000000PDgo.string."docker.NoSuchContainer"p0type.*"".NoSuchContainer€"runtime.zerovalueÀ.type."".NoSuchContainerÀgo.string."ID"àtype.stringgo.string."Err"°type.error`à.type."".NoSuchContainerà6go.string."NoSuchContainer"ð"go.importpath."".€°.type."".NoSuchContainerþ8go.string."*[]docker.Change"PB*[]docker.Change 8go.string."*[]docker.Change"þ"type.*[]"".Change  ûx°±6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*[]docker.Change"p4go.weak.type.**[]"".Change€"runtime.zerovalue type.[]"".Changeþ""..gostring.7Žvstruct { *docker.Config; HostConfig *docker.HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\"" } ""..gostring.7þætype.struct { *"".Config; HostConfig *"".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\"" }ààÆÖw À runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P""..gostring.7pøgo.weak.type.*struct { *"".Config; HostConfig *"".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\"" }€"runtime.zerovalueÀætype.struct { *"".Config; HostConfig *"".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\"" }àtype.*"".Config,go.string."HostConfig"°&type.*"".HostConfigÀŽgo.string."json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\""þVgo.string."*docker.ContainerAlreadyRunning"``*docker.ContainerAlreadyRunning Vgo.string."*docker.ContainerAlreadyRunning"þpgo.string."func(*docker.ContainerAlreadyRunning) string"€z,func(*docker.ContainerAlreadyRunning) string pgo.string."func(*docker.ContainerAlreadyRunning) string"þZtype.func(*"".ContainerAlreadyRunning) string  ….m3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."func(*docker.ContainerAlreadyRunning) string"plgo.weak.type.*func(*"".ContainerAlreadyRunning) string€"runtime.zerovalue €Ztype.func(*"".ContainerAlreadyRunning) stringÐZtype.func(*"".ContainerAlreadyRunning) string€@type.*"".ContainerAlreadyRunningtype.stringþ@type.*"".ContainerAlreadyRunningÐÐI#H6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PVgo.string."*docker.ContainerAlreadyRunning"pRgo.weak.type.**"".ContainerAlreadyRunning€"runtime.zerovalue>type."".ContainerAlreadyRunning` @type.*"".ContainerAlreadyRunningÀð@type.*"".ContainerAlreadyRunningð"go.string."Error"$type.func() string Ztype.func(*"".ContainerAlreadyRunning) string°F"".(*ContainerAlreadyRunning).ErrorÀF"".(*ContainerAlreadyRunning).ErrorþTgo.string."docker.ContainerAlreadyRunning"`^docker.ContainerAlreadyRunning Tgo.string."docker.ContainerAlreadyRunning"þFgo.string."ContainerAlreadyRunning"PPContainerAlreadyRunning Fgo.string."ContainerAlreadyRunning"þ>type."".ContainerAlreadyRunningààŽÙþ À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000PTgo.string."docker.ContainerAlreadyRunning"p@type.*"".ContainerAlreadyRunning€"runtime.zerovalueÀ>type."".ContainerAlreadyRunningÀgo.string."ID"àtype.string`>type."".ContainerAlreadyRunningFgo.string."ContainerAlreadyRunning" "go.importpath."".°à>type."".ContainerAlreadyRunningþNgo.string."*docker.ContainerNotRunning"`X*docker.ContainerNotRunning Ngo.string."*docker.ContainerNotRunning"þhgo.string."func(*docker.ContainerNotRunning) string"€r(func(*docker.ContainerNotRunning) string hgo.string."func(*docker.ContainerNotRunning) string"þRtype.func(*"".ContainerNotRunning) string  H R3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(*docker.ContainerNotRunning) string"pdgo.weak.type.*func(*"".ContainerNotRunning) string€"runtime.zerovalue €Rtype.func(*"".ContainerNotRunning) stringÐRtype.func(*"".ContainerNotRunning) string€8type.*"".ContainerNotRunningtype.stringþ8type.*"".ContainerNotRunningÐÐDzš6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."*docker.ContainerNotRunning"pJgo.weak.type.**"".ContainerNotRunning€"runtime.zerovalue6type."".ContainerNotRunning` 8type.*"".ContainerNotRunningÀð8type.*"".ContainerNotRunningð"go.string."Error"$type.func() string Rtype.func(*"".ContainerNotRunning) string°>"".(*ContainerNotRunning).ErrorÀ>"".(*ContainerNotRunning).ErrorþLgo.string."docker.ContainerNotRunning"`Vdocker.ContainerNotRunning Lgo.string."docker.ContainerNotRunning"þ>go.string."ContainerNotRunning"PHContainerNotRunning >go.string."ContainerNotRunning"þ6type."".ContainerNotRunningàà¦nV À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000PLgo.string."docker.ContainerNotRunning"p8type.*"".ContainerNotRunning€"runtime.zerovalueÀ6type."".ContainerNotRunningÀgo.string."ID"àtype.string`6type."".ContainerNotRunning>go.string."ContainerNotRunning" "go.importpath."".°à6type."".ContainerNotRunningþ6go.string."**docker.Client"@@**docker.Client 6go.string."**docker.Client"þ type.**"".Client  lä©Î6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."**docker.Client"p2go.weak.type.***"".Client€"runtime.zerovaluetype.*"".Clientþ6go.string."**io.PipeWriter"@@**io.PipeWriter 6go.string."**io.PipeWriter"þ(type.**io.PipeWriter  8ch<6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."**io.PipeWriter"p:go.weak.type.***io.PipeWriter€"runtime.zerovalue&type.*io.PipeWriterþ""..gostring.8ðìestruct { F uintptr; A0 **docker.Client; A1 *docker.StatsOptions; A2 **io.PipeWriter; A3 *chan error } ""..gostring.8þÄtype.struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }ÐÐ(&LY+  runtime.algarray0bruntime.gcbits.0x84884888880000000000000000000000P""..gostring.8pÖgo.weak.type.*struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }€"runtime.zerovalueÀÄtype.struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }Àgo.string."F"àtype.uintptrgo.string."A0"° type.**"".Clientàgo.string."A1"€*type.*"".StatsOptions°go.string."A2"Ð(type.**io.PipeWriter€go.string."A3"  type.*chan errorþ6go.string."**io.PipeReader"@@**io.PipeReader 6go.string."**io.PipeReader"þ(type.**io.PipeReader  #óûè6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."**io.PipeReader"p:go.weak.type.***io.PipeReader€"runtime.zerovalue&type.*io.PipeReaderþ6go.string."*chan struct {}"@@*chan struct {} 6go.string."*chan struct {}"þ(type.*chan struct {}  <^>ç6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*chan struct {}"p:go.weak.type.**chan struct {}€"runtime.zerovalue&type.chan struct {}þÂgo.string."struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"ÐÌUstruct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} } Âgo.string."struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"þ¬type.struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }€€ ªÑ•Ã  runtime.algarray0bruntime.gcbits.0x84880000000000000000000000000000PÂgo.string."struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"p¾go.weak.type.*struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }€"runtime.zerovalueÀ¬type.struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }Àgo.string."F"àtype.uintptrgo.string."A0"°*type.*"".StatsOptionsàgo.string."A1"€(type.**io.PipeReader°go.string."A2"Ð(type.*chan struct {}þ4go.string."**docker.Stats"@>**docker.Stats 4go.string."**docker.Stats"þtype.**"".Stats  cÜ®£6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."**docker.Stats"p0go.weak.type.***"".Stats€"runtime.zerovaluetype.*"".StatsþÐgo.string."struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"àÚ\struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader } Ðgo.string."struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"þºtype.struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }ÐÐ(Á¾º¬  runtime.algarray0bruntime.gcbits.0x84884888880000000000000000000000PÐgo.string."struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"pÌgo.weak.type.*struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }€"runtime.zerovalueÀºtype.struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }Àgo.string."F"àtype.uintptrgo.string."A0"°*type.*"".StatsOptionsàgo.string."A1"€ type.*chan error°go.string."A2"Ðtype.*error€go.string."A3" (type.**io.PipeReaderþÒgo.string."*struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"àÜ]*struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader } Ògo.string."*struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"þ¼type.*struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }  “'“6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PÒgo.string."*struct { F uintptr; A0 *docker.StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }"pÎgo.weak.type.**struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }€"runtime.zerovalueºtype.struct { F uintptr; A0 *"".StatsOptions; A1 *chan error; A2 *error; A3 **io.PipeReader }þ""..gostring.9ðîf*struct { F uintptr; A0 **docker.Client; A1 *docker.StatsOptions; A2 **io.PipeWriter; A3 *chan error } ""..gostring.9þÆtype.*struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }  3tø6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P""..gostring.9pØgo.weak.type.**struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }€"runtime.zerovalueÄtype.struct { F uintptr; A0 **"".Client; A1 *"".StatsOptions; A2 **io.PipeWriter; A3 *chan error }þÄgo.string."*struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"ÐÎV*struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} } Ägo.string."*struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"þ®type.*struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }  Ǧü¿6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PÄgo.string."*struct { F uintptr; A0 *docker.StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }"pÀgo.weak.type.**struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }€"runtime.zerovalue¬type.struct { F uintptr; A0 *"".StatsOptions; A1 **io.PipeReader; A2 *chan struct {} }þJgo.string."struct { StatusCode int }"`Tstruct { StatusCode int } Jgo.string."struct { StatusCode int }"þ,go.string."StatusCode"@6 +StatusCode ,go.string."StatusCode"þtype.*struct { StatusCode int }  ²6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."*struct { StatusCode int }"pPgo.weak.type.**struct { StatusCode int }€"runtime.zerovaluego.typelink.[1]string/[1]stringtype.[1]stringþ,go.string."*[1]string"@6 +*[1]string ,go.string."*[1]string"þtype.*[1]string  l.!ä6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[1]string"p0go.weak.type.**[1]string€"runtime.zerovaluetype.[1]stringþ*go.string."*[]string"@4 *[]string *go.string."*[]string"þtype.*[]string  ’"v„6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P*go.string."*[]string"p.go.weak.type.**[]string€"runtime.zerovaluetype.[]stringþ2go.string."*interface {}"@< *interface {} 2go.string."*interface {}"þ$type.*interface {}  O–6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."*interface {}"p6go.weak.type.**interface {}€"runtime.zerovalue"type.interface {}þPgo.string."*map.hdr[string]interface {}"`Z*map.hdr[string]interface {} Pgo.string."*map.hdr[string]interface {}"þBtype.*map.hdr[string]interface {}  þ ( +6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."*map.hdr[string]interface {}"pTgo.weak.type.**map.hdr[string]interface {}€"runtime.zerovalue@type.map.hdr[string]interface {}þPgo.string."map.iter[string]interface {}"`Zmap.iter[string]interface {} Pgo.string."map.iter[string]interface {}"þBtype.map.iter[string]interface {}ððPm8Ÿ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PPgo.string."map.iter[string]interface {}"pTgo.weak.type.*map.iter[string]interface {}€"runtime.zerovalueÀBtype.map.iter[string]interface {}Àgo.string."key"àtype.*stringgo.string."val"°$type.*interface {}àgo.string."t"€type.*uint8°go.string."h"ÐBtype.*map.hdr[string]interface {}€&go.string."buckets" Htype.*map.bucket[string]interface {}Ð go.string."bptr"ðHtype.*map.bucket[string]interface {} "go.string."other"Àtype.[4]uintptrþJgo.string."*chan<- *docker.APIEvents"`T*chan<- *docker.APIEvents Jgo.string."*chan<- *docker.APIEvents"þ4type.*chan<- *"".APIEvents  ý3‡06   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."*chan<- *docker.APIEvents"pFgo.weak.type.**chan<- *"".APIEvents€"runtime.zerovalue2type.chan<- *"".APIEventsþNgo.string."*[]chan<- *docker.APIEvents"`X*[]chan<- *docker.APIEvents Ngo.string."*[]chan<- *docker.APIEvents"þ8type.*[]chan<- *"".APIEvents  ¯´Ë&6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."*[]chan<- *docker.APIEvents"pJgo.weak.type.**[]chan<- *"".APIEvents€"runtime.zerovalue6type.[]chan<- *"".APIEventsþRgo.string."**docker.eventMonitoringState"`\**docker.eventMonitoringState Rgo.string."**docker.eventMonitoringState"þgo.typelink.[3]*uint8/[3]*uint8type.[3]*uint8þ(go.string."[]uint16"@2[]uint16 (go.string."[]uint16"þtype.[]uint16  çŽã    runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P(go.string."[]uint16"p,go.weak.type.*[]uint16€"runtime.zerovaluetype.uint16þ:go.typelink.[]uint16/[]uint16type.[]uint16þ*go.string."[3]uint16"@4 [3]uint16 *go.string."[3]uint16"þtype.[3]uint16ÀÀßq|.‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."[3]uint16"p.go.weak.type.*[3]uint16€"runtime.zerovaluetype.uint16 type.[]uint16þ>go.typelink.[3]uint16/[3]uint16type.[3]uint16þ,ôtype..gc.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }4þütype..gcprog.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }©–i™–©þ""..gostring.13„ñstruct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 } ""..gostring.13þ"go.string."tcase"0,tcase "go.string."tcase"þ"go.string."ncase"0,ncase "go.string."ncase"þ*go.string."pollorder"@4 pollorder *go.string."pollorder"þ*go.string."lockorder"@4 lockorder *go.string."lockorder"þ"go.string."scase"0,scase "go.string."scase"þ0go.string."lockorderarr"@: lockorderarr 0go.string."lockorderarr"þ0go.string."pollorderarr"@: pollorderarr 0go.string."pollorderarr"þìtype.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }ððÈ‚~õPY¨À8 à runtime.algarray0ôtype..gc.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }@ütype..gcprog.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }P""..gostring.13pþgo.weak.type.*struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }€"runtime.zerovalueÀìtype.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [3]*uint8; pollorderarr [3]uint16 }À"go.string."tcase"Ð"go.importpath."".àtype.uint16"go.string."ncase" "go.importpath."".°type.uint16à*go.string."pollorder"ð"go.importpath."".€type.*uint8°*go.string."lockorder"À"go.importpath."".Ðtype.*uint8€"go.string."scase""go.importpath."". êtype.[3]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }Ð0go.string."lockorderarr"à"go.importpath."".ðtype.[3]*uint8 0go.string."pollorderarr"°"go.importpath."".Àtype.[3]uint16þ®go.string."*struct { F uintptr; A0 **docker.eventMonitoringState; A1 **docker.Client }"À¸K*struct { F uintptr; A0 **docker.eventMonitoringState; A1 **docker.Client } ®go.string."*struct { F uintptr; A0 **docker.eventMonitoringState; A1 **docker.Client }"þtype.*struct { F uintptr; A0 **"".eventMonitoringState; A1 **"".Client }  §À@6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P®go.string."*struct { F uintptr; A0 **docker.eventMonitoringState; A1 **docker.Client }"p¢go.weak.type.**struct { F uintptr; A0 **"".eventMonitoringState; A1 **"".Client }€"runtime.zerovalueŽtype.struct { F uintptr; A0 **"".eventMonitoringState; A1 **"".Client }þFgo.string."*chan *docker.APIEvents"PP*chan *docker.APIEvents Fgo.string."*chan *docker.APIEvents"þ0type.*chan *"".APIEvents  ~oN(6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*chan *docker.APIEvents"pBgo.weak.type.**chan *"".APIEvents€"runtime.zerovalue.type.chan *"".APIEventsþÖgo.string."struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error }"àà_struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error } Ögo.string."struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error }"þ¸type.struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }ÐÐ(ȘÊ9  runtime.algarray0bruntime.gcbits.0x84884888880000000000000000000000PÖgo.string."struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error }"pÊgo.weak.type.*struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }€"runtime.zerovalueÀ¸type.struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }Àgo.string."F"àtype.uintptrgo.string."A0"°type.*erroràgo.string."A1"€ type.**"".Client°go.string."A2"Ð0type.*chan *"".APIEvents€go.string."A3"  type.*chan errorþØgo.string."*struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error }"ðâ`*struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error } Øgo.string."*struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error }"þºtype.*struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }  ˆÒš‹6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PØgo.string."*struct { F uintptr; A0 *error; A1 **docker.Client; A2 *chan *docker.APIEvents; A3 *chan error }"pÌgo.weak.type.**struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }€"runtime.zerovalue¸type.struct { F uintptr; A0 *error; A1 **"".Client; A2 *chan *"".APIEvents; A3 *chan error }þgo.string."*[]docker.APIImages"PH*[]docker.APIImages >go.string."*[]docker.APIImages"þ(type.*[]"".APIImages  âÁ”ƒ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*[]docker.APIImages"p:go.weak.type.**[]"".APIImages€"runtime.zerovalue&type.[]"".APIImagesþDgo.string."*[]docker.ImageHistory"PN*[]docker.ImageHistory Dgo.string."*[]docker.ImageHistory"þ.type.*[]"".ImageHistory  |Ø„º6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."*[]docker.ImageHistory"p@go.weak.type.**[]"".ImageHistory€"runtime.zerovalue,type.[]"".ImageHistoryþ6type..gcprog."".ImagePre012((AfVš™U–e–e™ffeYšiþjson:"created" 8go.string."json:\"created\""þPgo.string."json:\"container,omitempty\""`Vjson:"container,omitempty" Pgo.string."json:\"container,omitempty\""þ^go.string."json:\"container_config,omitempty\""pd!json:"container_config,omitempty" ^go.string."json:\"container_config,omitempty\""þZgo.string."json:\"docker_version,omitempty\""``json:"docker_version,omitempty" Zgo.string."json:\"docker_version,omitempty\""þJgo.string."json:\"author,omitempty\""PPjson:"author,omitempty" Jgo.string."json:\"author,omitempty\""þJgo.string."json:\"config,omitempty\""PPjson:"config,omitempty" Jgo.string."json:\"config,omitempty\""þVgo.string."json:\"architecture,omitempty\""`\json:"architecture,omitempty" Vgo.string."json:\"architecture,omitempty\""þFgo.string."json:\"size,omitempty\""PLjson:"size,omitempty" Fgo.string."json:\"size,omitempty\""þ.go.string."ImagePre012"@8 ImagePre012 .go.string."ImagePre012"þ&type."".ImagePre012€ € äÕ×äY  0HXÈØèðV à runtime.algarray@6type..gcprog."".ImagePre012Pgo.string."*docker.ImagePre012"PH*docker.ImagePre012 >go.string."*docker.ImagePre012"þ(type.*"".ImagePre012   ÅNw6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."*docker.ImagePre012"p:go.weak.type.**"".ImagePre012€"runtime.zerovalue&type."".ImagePre012þHgo.string."*[]docker.APIImageSearch"`R*[]docker.APIImageSearch Hgo.string."*[]docker.APIImageSearch"þ2type.*[]"".APIImageSearch  Sœ€Ò6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."*[]docker.APIImageSearch"pDgo.weak.type.**[]"".APIImageSearch€"runtime.zerovalue0type.[]"".APIImageSearchþ:go.string."*[]docker.Network"PD*[]docker.Network :go.string."*[]docker.Network"þ$type.*[]"".Network  Mr©6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*[]docker.Network"p6go.weak.type.**[]"".Network€"runtime.zerovalue"type.[]"".NetworkþBgo.string."*docker.NoSuchNetwork"PL*docker.NoSuchNetwork Bgo.string."*docker.NoSuchNetwork"þ\go.string."func(*docker.NoSuchNetwork) string"pf"func(*docker.NoSuchNetwork) string \go.string."func(*docker.NoSuchNetwork) string"þFtype.func(*"".NoSuchNetwork) string  «93 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(*docker.NoSuchNetwork) string"pXgo.weak.type.*func(*"".NoSuchNetwork) string€"runtime.zerovalue €Ftype.func(*"".NoSuchNetwork) stringÐFtype.func(*"".NoSuchNetwork) string€,type.*"".NoSuchNetworktype.stringþ,type.*"".NoSuchNetworkÐÐç\Ÿ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*docker.NoSuchNetwork"p>go.weak.type.**"".NoSuchNetwork€"runtime.zerovalue*type."".NoSuchNetwork` ,type.*"".NoSuchNetworkÀð,type.*"".NoSuchNetworkð"go.string."Error"$type.func() string Ftype.func(*"".NoSuchNetwork) string°2"".(*NoSuchNetwork).ErrorÀ2"".(*NoSuchNetwork).Errorþ@go.string."docker.NoSuchNetwork"PJdocker.NoSuchNetwork @go.string."docker.NoSuchNetwork"þ2go.string."NoSuchNetwork"@< NoSuchNetwork 2go.string."NoSuchNetwork"þ*type."".NoSuchNetworkàà^ß`¾ À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000P@go.string."docker.NoSuchNetwork"p,type.*"".NoSuchNetwork€"runtime.zerovalueÀ*type."".NoSuchNetworkÀgo.string."ID"àtype.string`*type."".NoSuchNetwork2go.string."NoSuchNetwork" "go.importpath."".°à*type."".NoSuchNetworkþPgo.string."docker.createNetworkResponse"`Zdocker.createNetworkResponse Pgo.string."docker.createNetworkResponse"þBgo.string."createNetworkResponse"PLcreateNetworkResponse Bgo.string."createNetworkResponse"þ@type."".createNetworkResponse·1ààÙDÜÕ À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000PPgo.string."docker.createNetworkResponse"pBtype.*"".createNetworkResponse·1€"runtime.zerovalueÀ@type."".createNetworkResponse·1Àgo.string."ID"àtype.string`@type."".createNetworkResponse·1Bgo.string."createNetworkResponse" "go.importpath."".°à@type."".createNetworkResponse·1þRgo.string."*docker.createNetworkResponse"`\*docker.createNetworkResponse Rgo.string."*docker.createNetworkResponse"þBtype.*"".createNetworkResponse·1  i½#Ý6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."*docker.createNetworkResponse"pTgo.weak.type.**"".createNetworkResponse·1€"runtime.zerovalue@type."".createNetworkResponse·1þtgo.string."struct { F uintptr; A0 *string; A1 *[]string }"€~.struct { F uintptr; A0 *string; A1 *[]string } tgo.string."struct { F uintptr; A0 *string; A1 *[]string }"þftype.struct { F uintptr; A0 *string; A1 *[]string }°°K‰Ê'  runtime.algarray0bruntime.gcbits.0x84488800000000000000000000000000Ptgo.string."struct { F uintptr; A0 *string; A1 *[]string }"pxgo.weak.type.*struct { F uintptr; A0 *string; A1 *[]string }€"runtime.zerovalueÀftype.struct { F uintptr; A0 *string; A1 *[]string }Àgo.string."F"àtype.uintptrgo.string."A0"°type.*stringàgo.string."A1"€type.*[]stringþvgo.string."*struct { F uintptr; A0 *string; A1 *[]string }"€€/*struct { F uintptr; A0 *string; A1 *[]string } vgo.string."*struct { F uintptr; A0 *string; A1 *[]string }"þhtype.*struct { F uintptr; A0 *string; A1 *[]string }  —”4¾6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pvgo.string."*struct { F uintptr; A0 *string; A1 *[]string }"pzgo.weak.type.**struct { F uintptr; A0 *string; A1 *[]string }€"runtime.zerovalueftype.struct { F uintptr; A0 *string; A1 *[]string }þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·78fd77a07ab543a063c3a3049973febe þXgo.string."interface { CloseWrite() error }"pb interface { CloseWrite() error } Xgo.string."interface { CloseWrite() error }"þ,go.string."CloseWrite"@6 +CloseWrite ,go.string."CloseWrite"þJtype.interface { CloseWrite() error }ðð•Z… à runtime.algarray0bruntime.gcbits.0x8c000000000000000000000000000000PXgo.string."interface { CloseWrite() error }"p\go.weak.type.*interface { CloseWrite() error }€"runtime.zerovalueÀJtype.interface { CloseWrite() error }À,go.string."CloseWrite"à"type.func() errorþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.string."docker.tlsClientCon"PHdocker.tlsClientCon >go.string."docker.tlsClientCon"þ&go.string."rawConn"00rawConn &go.string."rawConn"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·57e1009a600f832f844e0e3c49ba5a89 +.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·40de35fb9b773b345d1ee7cba691ea13 þTgclocals·b0f264e78fa38c77ad79fe8a353279f7þTgclocals·25609300e15c97db07af80faee4d2fd6 $.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·57e1009a600f832f844e0e3c49ba5a89 +.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·57e1009a600f832f844e0e3c49ba5a89 +.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13d3af77a5bf02af6db4588efb2ea811þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·da455f41cf2a78c8890074a4a256bdd4 .þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9877a4ef732a0f966b889793f9b99b87 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·0273bd9c87bb10f67d516fbf00fd7767®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·57e1009a600f832f844e0e3c49ba5a89 +.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·86b4418f46455e3a0eb577619691d10f –þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·be9b149192cd561578dd28b30f28e84fn þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·86b4418f46455e3a0eb577619691d10f –þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·be9b149192cd561578dd28b30f28e84fn þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·86b4418f46455e3a0eb577619691d10f –þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·be9b149192cd561578dd28b30f28e84fn þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·14c45952157723c8762210d9c661bf29 + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·be4f16eacaf744756abcb34364e01385®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9877a4ef732a0f966b889793f9b99b87 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·0273bd9c87bb10f67d516fbf00fd7767®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·57e1009a600f832f844e0e3c49ba5a89 +.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9f0d5ba6770c4a1ed4fa771547e96df1 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·4e44481e9dee421443081e94ffaa0dd2®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9877a4ef732a0f966b889793f9b99b87 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·0273bd9c87bb10f67d516fbf00fd7767®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·5dfce38b1d248a3900c6ec750de77702 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d93d6c9fc85d7888b8b1832756680f45.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·6a2e5ab2d393a1bfd331903fbd0fd425þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·c776d40308d3cc87dab399555a94d3ca nþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·6a2e5ab2d393a1bfd331903fbd0fd425þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·c776d40308d3cc87dab399555a94d3ca nþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·6a2e5ab2d393a1bfd331903fbd0fd425þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·c776d40308d3cc87dab399555a94d3ca nþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·57e1009a600f832f844e0e3c49ba5a89 +.þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a99c50f5f5d34b1bf54d8ece6dad05c2&þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·adf7fd756b6e86afbfe88b4b789f56a2nBþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f35b06e445e251bd5ec01f0c98f96353&þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·7f02f8d572b489939b1d1d8614f82cdenþVgo.string."func(docker.tlsClientCon) error"``func(docker.tlsClientCon) error Vgo.string."func(docker.tlsClientCon) error"þ@type.func("".tlsClientCon) error  .ç¦3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PVgo.string."func(docker.tlsClientCon) error"pRgo.weak.type.*func("".tlsClientCon) error€"runtime.zerovalue €@type.func("".tlsClientCon) errorÐ@type.func("".tlsClientCon) error€(type."".tlsClientContype.errorþrgo.string."func(docker.tlsClientCon) tls.ConnectionState"€|-func(docker.tlsClientCon) tls.ConnectionState rgo.string."func(docker.tlsClientCon) tls.ConnectionState"þjtype.func("".tlsClientCon) crypto/tls.ConnectionState  ãd 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Prgo.string."func(docker.tlsClientCon) tls.ConnectionState"p|go.weak.type.*func("".tlsClientCon) crypto/tls.ConnectionState€"runtime.zerovalue €jtype.func("".tlsClientCon) crypto/tls.ConnectionStateÐjtype.func("".tlsClientCon) crypto/tls.ConnectionState€(type."".tlsClientCon>type.crypto/tls.ConnectionStateþ\go.string."func(docker.tlsClientCon) net.Addr"pf"func(docker.tlsClientCon) net.Addr \go.string."func(docker.tlsClientCon) net.Addr"þFtype.func("".tlsClientCon) net.Addr  eé3'3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(docker.tlsClientCon) net.Addr"pXgo.weak.type.*func("".tlsClientCon) net.Addr€"runtime.zerovalue €Ftype.func("".tlsClientCon) net.AddrÐFtype.func("".tlsClientCon) net.Addr€(type."".tlsClientContype.net.AddrþZgo.string."func(docker.tlsClientCon) []uint8"pd!func(docker.tlsClientCon) []uint8 Zgo.string."func(docker.tlsClientCon) []uint8"þDtype.func("".tlsClientCon) []uint8  Ä²3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."func(docker.tlsClientCon) []uint8"pVgo.weak.type.*func("".tlsClientCon) []uint8€"runtime.zerovalue €Dtype.func("".tlsClientCon) []uint8ÐDtype.func("".tlsClientCon) []uint8€(type."".tlsClientContype.[]uint8þvgo.string."func(docker.tlsClientCon, []uint8) (int, error)"€€/func(docker.tlsClientCon, []uint8) (int, error) vgo.string."func(docker.tlsClientCon, []uint8) (int, error)"þ`type.func("".tlsClientCon, []uint8) (int, error)ÀÀ•3¬›3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pvgo.string."func(docker.tlsClientCon, []uint8) (int, error)"prgo.weak.type.*func("".tlsClientCon, []uint8) (int, error)€"runtime.zerovalue €`type.func("".tlsClientCon, []uint8) (int, error)Р`type.func("".tlsClientCon, []uint8) (int, error)€(type."".tlsClientContype.[]uint8 type.int°type.errorþlgo.string."func(docker.tlsClientCon, time.Time) error"€v*func(docker.tlsClientCon, time.Time) error lgo.string."func(docker.tlsClientCon, time.Time) error"þVtype.func("".tlsClientCon, time.Time) error°° iôÂ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(docker.tlsClientCon, time.Time) error"phgo.weak.type.*func("".tlsClientCon, time.Time) error€"runtime.zerovalue €Vtype.func("".tlsClientCon, time.Time) errorРVtype.func("".tlsClientCon, time.Time) error€(type."".tlsClientContype.time.Time type.errorþfgo.string."func(docker.tlsClientCon, string) error"pp'func(docker.tlsClientCon, string) error fgo.string."func(docker.tlsClientCon, string) error"þPtype.func("".tlsClientCon, string) error°°IÇM3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(docker.tlsClientCon, string) error"pbgo.weak.type.*func("".tlsClientCon, string) error€"runtime.zerovalue €Ptype.func("".tlsClientCon, string) errorРPtype.func("".tlsClientCon, string) error€(type."".tlsClientContype.string type.errorþgo.string."func(docker.tlsClientCon, []uint8) (*tls.sessionState, bool)" š<func(docker.tlsClientCon, []uint8) (*tls.sessionState, bool) go.string."func(docker.tlsClientCon, []uint8) (*tls.sessionState, bool)"þˆtype.func("".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)ÀÀÁÝêþ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pgo.string."func(docker.tlsClientCon, []uint8) (*tls.sessionState, bool)"pšgo.weak.type.*func("".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)€"runtime.zerovalue €ˆtype.func("".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)Рˆtype.func("".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)€(type."".tlsClientContype.[]uint8 :type.*crypto/tls.sessionState°type.boolþ’go.string."func(docker.tlsClientCon, *tls.sessionState) ([]uint8, error)" œ=func(docker.tlsClientCon, *tls.sessionState) ([]uint8, error) ’go.string."func(docker.tlsClientCon, *tls.sessionState) ([]uint8, error)"þŠtype.func("".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)ÀÀgÚá¹3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P’go.string."func(docker.tlsClientCon, *tls.sessionState) ([]uint8, error)"pœgo.weak.type.*func("".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)€"runtime.zerovalue €Štype.func("".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)РŠtype.func("".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)€(type."".tlsClientCon:type.*crypto/tls.sessionState type.[]uint8°type.errorþvgo.string."func(docker.tlsClientCon) (interface {}, error)"€€/func(docker.tlsClientCon) (interface {}, error) vgo.string."func(docker.tlsClientCon) (interface {}, error)"þ`type.func("".tlsClientCon) (interface {}, error)°°m#¸c3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pvgo.string."func(docker.tlsClientCon) (interface {}, error)"prgo.weak.type.*func("".tlsClientCon) (interface {}, error)€"runtime.zerovalue €`type.func("".tlsClientCon) (interface {}, error)Ð`type.func("".tlsClientCon) (interface {}, error)€(type."".tlsClientCon"type.interface {} type.errorþvgo.string."func(docker.tlsClientCon, tls.recordType) error"€€/func(docker.tlsClientCon, tls.recordType) error vgo.string."func(docker.tlsClientCon, tls.recordType) error"þntype.func("".tlsClientCon, crypto/tls.recordType) error°°»ÌÄJ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pvgo.string."func(docker.tlsClientCon, tls.recordType) error"p€go.weak.type.*func("".tlsClientCon, crypto/tls.recordType) error€"runtime.zerovalue €ntype.func("".tlsClientCon, crypto/tls.recordType) errorРntype.func("".tlsClientCon, crypto/tls.recordType) error€(type."".tlsClientCon4type.crypto/tls.recordType type.errorþlgo.string."func(docker.tlsClientCon, tls.alert) error"€v*func(docker.tlsClientCon, tls.alert) error lgo.string."func(docker.tlsClientCon, tls.alert) error"þdtype.func("".tlsClientCon, crypto/tls.alert) error°°Õà½3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(docker.tlsClientCon, tls.alert) error"pvgo.weak.type.*func("".tlsClientCon, crypto/tls.alert) error€"runtime.zerovalue €dtype.func("".tlsClientCon, crypto/tls.alert) errorРdtype.func("".tlsClientCon, crypto/tls.alert) error€(type."".tlsClientCon*type.crypto/tls.alert type.errorþ¸go.string."func(docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"ÐÂPfunc(docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite ¸go.string."func(docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"þ°type.func("".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteððšŸej3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P¸go.string."func(docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"pÂgo.weak.type.*func("".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€"runtime.zerovalue €°type.func("".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteÐà°type.func("".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€(type."".tlsClientContype.uint16 type.[]uint16°type.uint16Àtype.boolÐtype.boolà8type.*crypto/tls.cipherSuiteþ–go.string."func(docker.tlsClientCon, tls.recordType, []uint8) (int, error)"  ?func(docker.tlsClientCon, tls.recordType, []uint8) (int, error) –go.string."func(docker.tlsClientCon, tls.recordType, []uint8) (int, error)"þŽtype.func("".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)ÐПí:3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P–go.string."func(docker.tlsClientCon, tls.recordType, []uint8) (int, error)"p go.weak.type.*func("".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)€"runtime.zerovalue €Žtype.func("".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)аŽtype.func("".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)€(type."".tlsClientCon4type.crypto/tls.recordType type.[]uint8°type.intÀtype.errorþ0go.string."tlsClientCon"@: tlsClientCon 0go.string."tlsClientCon"þ"go.string."Close"0,Close "go.string."Close"þ6go.string."ConnectionState"@@ConnectionState 6go.string."ConnectionState"þLgo.string."func() tls.ConnectionState"`Vfunc() tls.ConnectionState Lgo.string."func() tls.ConnectionState"þLtype.func() crypto/tls.ConnectionState%Å33 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func() tls.ConnectionState"p^go.weak.type.*func() crypto/tls.ConnectionState€"runtime.zerovalue €Ltype.func() crypto/tls.ConnectionStateЀLtype.func() crypto/tls.ConnectionState€>type.crypto/tls.ConnectionStateþ*go.string."Handshake"@4 Handshake *go.string."Handshake"þ*go.string."LocalAddr"@4 LocalAddr *go.string."LocalAddr"þ6go.string."func() net.Addr"@@func() net.Addr 6go.string."func() net.Addr"þ(type.func() net.AddrWHù3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."func() net.Addr"p:go.weak.type.*func() net.Addr€"runtime.zerovalue €(type.func() net.AddrЀ(type.func() net.Addr€type.net.Addrþ0go.string."OCSPResponse"@: OCSPResponse 0go.string."OCSPResponse"þ4go.string."func() []uint8"@>func() []uint8 4go.string."func() []uint8"þ&type.func() []uint8Þio%3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."func() []uint8"p8go.weak.type.*func() []uint8€"runtime.zerovalue €&type.func() []uint8Ѐ&type.func() []uint8€type.[]uint8þLgo.string."func([]uint8) (int, error)"`Vfunc([]uint8) (int, error) Lgo.string."func([]uint8) (int, error)"þ>type.func([]uint8) (int, error)°°„N4P3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func([]uint8) (int, error)"pPgo.weak.type.*func([]uint8) (int, error)€"runtime.zerovalue €>type.func([]uint8) (int, error)Ð>type.func([]uint8) (int, error)€type.[]uint8type.int type.errorþ,go.string."RemoteAddr"@6 +RemoteAddr ,go.string."RemoteAddr"þ.go.string."SetDeadline"@8 SetDeadline .go.string."SetDeadline"þBgo.string."func(time.Time) error"PLfunc(time.Time) error Bgo.string."func(time.Time) error"þ4type.func(time.Time) error  @ZŽ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."func(time.Time) error"pFgo.weak.type.*func(time.Time) error€"runtime.zerovalue €4type.func(time.Time) errorÐ4type.func(time.Time) error€type.time.Timetype.errorþ6go.string."SetReadDeadline"@@SetReadDeadline 6go.string."SetReadDeadline"þ8go.string."SetWriteDeadline"PBSetWriteDeadline 8go.string."SetWriteDeadline"þ4go.string."VerifyHostname"@>VerifyHostname 4go.string."VerifyHostname"þ"go.string."Write"0,Write "go.string."Write"þ6go.string."clientHandshake"@@clientHandshake 6go.string."clientHandshake"þ,go.string."crypto/tls"@6 +crypto/tls ,go.string."crypto/tls"þ2go.importpath.crypto/tls. + ,go.string."crypto/tls"þ2go.string."decryptTicket"@< decryptTicket 2go.string."decryptTicket"þfgo.string."func([]uint8) (*tls.sessionState, bool)"pp'func([]uint8) (*tls.sessionState, bool) fgo.string."func([]uint8) (*tls.sessionState, bool)"þftype.func([]uint8) (*crypto/tls.sessionState, bool)°°á;Oç3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func([]uint8) (*tls.sessionState, bool)"pxgo.weak.type.*func([]uint8) (*crypto/tls.sessionState, bool)€"runtime.zerovalue €ftype.func([]uint8) (*crypto/tls.sessionState, bool)Ðftype.func([]uint8) (*crypto/tls.sessionState, bool)€type.[]uint8:type.*crypto/tls.sessionState type.boolþ2go.string."encryptTicket"@< encryptTicket 2go.string."encryptTicket"þhgo.string."func(*tls.sessionState) ([]uint8, error)"€r(func(*tls.sessionState) ([]uint8, error) hgo.string."func(*tls.sessionState) ([]uint8, error)"þhtype.func(*crypto/tls.sessionState) ([]uint8, error)°°kI;h3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(*tls.sessionState) ([]uint8, error)"pzgo.weak.type.*func(*crypto/tls.sessionState) ([]uint8, error)€"runtime.zerovalue €htype.func(*crypto/tls.sessionState) ([]uint8, error)Ðhtype.func(*crypto/tls.sessionState) ([]uint8, error)€:type.*crypto/tls.sessionStatetype.[]uint8 type.errorþ2go.string."readHandshake"@< readHandshake 2go.string."readHandshake"þPgo.string."func() (interface {}, error)"`Zfunc() (interface {}, error) Pgo.string."func() (interface {}, error)"þBtype.func() (interface {}, error)  jå /3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func() (interface {}, error)"pTgo.weak.type.*func() (interface {}, error)€"runtime.zerovalue €Btype.func() (interface {}, error)ЀBtype.func() (interface {}, error)€"type.interface {}type.errorþ,go.string."readRecord"@6 +readRecord ,go.string."readRecord"þLgo.string."func(tls.recordType) error"`Vfunc(tls.recordType) error Lgo.string."func(tls.recordType) error"þLtype.func(crypto/tls.recordType) error  w3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(tls.recordType) error"p^go.weak.type.*func(crypto/tls.recordType) error€"runtime.zerovalue €Ltype.func(crypto/tls.recordType) errorÐLtype.func(crypto/tls.recordType) error€4type.crypto/tls.recordTypetype.errorþ*go.string."sendAlert"@4 sendAlert *go.string."sendAlert"þBgo.string."func(tls.alert) error"PLfunc(tls.alert) error Bgo.string."func(tls.alert) error"þBtype.func(crypto/tls.alert) error   d˜ˆ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."func(tls.alert) error"pTgo.weak.type.*func(crypto/tls.alert) error€"runtime.zerovalue €Btype.func(crypto/tls.alert) errorÐBtype.func(crypto/tls.alert) error€*type.crypto/tls.alerttype.errorþ6go.string."sendAlertLocked"@@sendAlertLocked 6go.string."sendAlertLocked"þ6go.string."serverHandshake"@@serverHandshake 6go.string."serverHandshake"þ4go.string."tryCipherSuite"@>tryCipherSuite 4go.string."tryCipherSuite"þŽgo.string."func(uint16, []uint16, uint16, bool, bool) *tls.cipherSuite" ˜;func(uint16, []uint16, uint16, bool, bool) *tls.cipherSuite Žgo.string."func(uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"þŽtype.func(uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteàà6~Í3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"p go.weak.type.*func(uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€"runtime.zerovalue €Žtype.func(uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteÐÐŽtype.func(uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€type.uint16type.[]uint16 type.uint16°type.boolÀtype.boolÐ8type.*crypto/tls.cipherSuiteþ.go.string."writeRecord"@8 writeRecord .go.string."writeRecord"þlgo.string."func(tls.recordType, []uint8) (int, error)"€v*func(tls.recordType, []uint8) (int, error) lgo.string."func(tls.recordType, []uint8) (int, error)"þltype.func(crypto/tls.recordType, []uint8) (int, error)ÀÀ¥€`3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(tls.recordType, []uint8) (int, error)"p~go.weak.type.*func(crypto/tls.recordType, []uint8) (int, error)€"runtime.zerovalue €ltype.func(crypto/tls.recordType, []uint8) (int, error)Рltype.func(crypto/tls.recordType, []uint8) (int, error)€4type.crypto/tls.recordTypetype.[]uint8 type.int°type.errorþ(type."".tlsClientConðð¶¶ÇØŒ 2type..alg."".tlsClientCon0bruntime.gcbits.0xc8888c00000000000000000000000000P>go.string."docker.tlsClientCon"p*type.*"".tlsClientCon€"runtime.zerovalueÀ(type."".tlsClientConà*type.*crypto/tls.Conn&go.string."rawConn" "go.importpath."".°type.net.Conn`à(type."".tlsClientConà0go.string."tlsClientCon"ð"go.importpath."".€°(type."".tlsClientCon°"go.string."Close"Ð"type.func() errorà@type.func("".tlsClientCon) errorð0"".(*tlsClientCon).Close€*"".tlsClientCon.Close6go.string."ConnectionState"°Ltype.func() crypto/tls.ConnectionStateÀjtype.func("".tlsClientCon) crypto/tls.ConnectionStateÐD"".(*tlsClientCon).ConnectionStateà>"".tlsClientCon.ConnectionStateð*go.string."Handshake""type.func() error @type.func("".tlsClientCon) error°8"".(*tlsClientCon).HandshakeÀ2"".tlsClientCon.HandshakeÐ*go.string."LocalAddr"ð(type.func() net.Addr€Ftype.func("".tlsClientCon) net.Addr8"".(*tlsClientCon).LocalAddr 2"".tlsClientCon.LocalAddr°0go.string."OCSPResponse"Ð&type.func() []uint8àDtype.func("".tlsClientCon) []uint8ð>"".(*tlsClientCon).OCSPResponse€8"".tlsClientCon.OCSPResponse go.string."Read"°>type.func([]uint8) (int, error)À`type.func("".tlsClientCon, []uint8) (int, error)Ð."".(*tlsClientCon).Readà("".tlsClientCon.Readð,go.string."RemoteAddr"(type.func() net.Addr Ftype.func("".tlsClientCon) net.Addr°:"".(*tlsClientCon).RemoteAddrÀ4"".tlsClientCon.RemoteAddrÐ.go.string."SetDeadline"ð4type.func(time.Time) error€ Vtype.func("".tlsClientCon, time.Time) error <"".(*tlsClientCon).SetDeadline  6"".tlsClientCon.SetDeadline° 6go.string."SetReadDeadline"Ð 4type.func(time.Time) errorà Vtype.func("".tlsClientCon, time.Time) errorð D"".(*tlsClientCon).SetReadDeadline€ +>"".tlsClientCon.SetReadDeadline +8go.string."SetWriteDeadline"° +4type.func(time.Time) errorÀ +Vtype.func("".tlsClientCon, time.Time) errorÐ +F"".(*tlsClientCon).SetWriteDeadlineà +@"".tlsClientCon.SetWriteDeadlineð +4go.string."VerifyHostname" .type.func(string) error  Ptype.func("".tlsClientCon, string) error° B"".(*tlsClientCon).VerifyHostnameÀ <"".tlsClientCon.VerifyHostnameÐ "go.string."Write"ð >type.func([]uint8) (int, error)€ `type.func("".tlsClientCon, []uint8) (int, error) 0"".(*tlsClientCon).Write  *"".tlsClientCon.Write° 6go.string."clientHandshake"À 2go.importpath.crypto/tls.Ð "type.func() errorà @type.func("".tlsClientCon) errorð Z"".(*tlsClientCon).crypto/tls.clientHandshake€ T"".tlsClientCon.crypto/tls.clientHandshake 2go.string."decryptTicket"  2go.importpath.crypto/tls.° ftype.func([]uint8) (*crypto/tls.sessionState, bool)À ˆtype.func("".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)Ð V"".(*tlsClientCon).crypto/tls.decryptTicketà P"".tlsClientCon.crypto/tls.decryptTicketð 2go.string."encryptTicket"€2go.importpath.crypto/tls.htype.func(*crypto/tls.sessionState) ([]uint8, error) Štype.func("".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)°V"".(*tlsClientCon).crypto/tls.encryptTicketÀP"".tlsClientCon.crypto/tls.encryptTicketÐ2go.string."readHandshake"à2go.importpath.crypto/tls.ðBtype.func() (interface {}, error)€`type.func("".tlsClientCon) (interface {}, error)V"".(*tlsClientCon).crypto/tls.readHandshake P"".tlsClientCon.crypto/tls.readHandshake°,go.string."readRecord"À2go.importpath.crypto/tls.ÐLtype.func(crypto/tls.recordType) erroràntype.func("".tlsClientCon, crypto/tls.recordType) errorðP"".(*tlsClientCon).crypto/tls.readRecord€J"".tlsClientCon.crypto/tls.readRecord*go.string."sendAlert" 2go.importpath.crypto/tls.°Btype.func(crypto/tls.alert) errorÀdtype.func("".tlsClientCon, crypto/tls.alert) errorÐN"".(*tlsClientCon).crypto/tls.sendAlertàH"".tlsClientCon.crypto/tls.sendAlertð6go.string."sendAlertLocked"€2go.importpath.crypto/tls.Btype.func(crypto/tls.alert) error dtype.func("".tlsClientCon, crypto/tls.alert) error°Z"".(*tlsClientCon).crypto/tls.sendAlertLockedÀT"".tlsClientCon.crypto/tls.sendAlertLockedÐ6go.string."serverHandshake"à2go.importpath.crypto/tls.ð"type.func() error€@type.func("".tlsClientCon) errorZ"".(*tlsClientCon).crypto/tls.serverHandshake T"".tlsClientCon.crypto/tls.serverHandshake°4go.string."tryCipherSuite"À2go.importpath.crypto/tls.ÐŽtype.func(uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteà°type.func("".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteðX"".(*tlsClientCon).crypto/tls.tryCipherSuite€R"".tlsClientCon.crypto/tls.tryCipherSuite.go.string."writeRecord" 2go.importpath.crypto/tls.°ltype.func(crypto/tls.recordType, []uint8) (int, error)ÀŽtype.func("".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)ÐR"".(*tlsClientCon).crypto/tls.writeRecordàL"".tlsClientCon.crypto/tls.writeRecordþ@go.string."*docker.tlsClientCon"PJ*docker.tlsClientCon @go.string."*docker.tlsClientCon"þXgo.string."func(*docker.tlsClientCon) error"pb func(*docker.tlsClientCon) error Xgo.string."func(*docker.tlsClientCon) error"þBtype.func(*"".tlsClientCon) error  SÇ}3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."func(*docker.tlsClientCon) error"pTgo.weak.type.*func(*"".tlsClientCon) error€"runtime.zerovalue €Btype.func(*"".tlsClientCon) errorÐBtype.func(*"".tlsClientCon) error€*type.*"".tlsClientContype.errorþtgo.string."func(*docker.tlsClientCon) tls.ConnectionState"€~.func(*docker.tlsClientCon) tls.ConnectionState tgo.string."func(*docker.tlsClientCon) tls.ConnectionState"þltype.func(*"".tlsClientCon) crypto/tls.ConnectionState  VÌ3¶3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."func(*docker.tlsClientCon) tls.ConnectionState"p~go.weak.type.*func(*"".tlsClientCon) crypto/tls.ConnectionState€"runtime.zerovalue €ltype.func(*"".tlsClientCon) crypto/tls.ConnectionStateÐltype.func(*"".tlsClientCon) crypto/tls.ConnectionState€*type.*"".tlsClientCon>type.crypto/tls.ConnectionStateþ^go.string."func(*docker.tlsClientCon) net.Addr"ph#func(*docker.tlsClientCon) net.Addr ^go.string."func(*docker.tlsClientCon) net.Addr"þHtype.func(*"".tlsClientCon) net.Addr  ;é,93 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*docker.tlsClientCon) net.Addr"pZgo.weak.type.*func(*"".tlsClientCon) net.Addr€"runtime.zerovalue €Htype.func(*"".tlsClientCon) net.AddrÐHtype.func(*"".tlsClientCon) net.Addr€*type.*"".tlsClientContype.net.Addrþ\go.string."func(*docker.tlsClientCon) []uint8"pf"func(*docker.tlsClientCon) []uint8 \go.string."func(*docker.tlsClientCon) []uint8"þFtype.func(*"".tlsClientCon) []uint8  =©È3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(*docker.tlsClientCon) []uint8"pXgo.weak.type.*func(*"".tlsClientCon) []uint8€"runtime.zerovalue €Ftype.func(*"".tlsClientCon) []uint8ÐFtype.func(*"".tlsClientCon) []uint8€*type.*"".tlsClientContype.[]uint8þxgo.string."func(*docker.tlsClientCon, []uint8) (int, error)"‚0func(*docker.tlsClientCon, []uint8) (int, error) xgo.string."func(*docker.tlsClientCon, []uint8) (int, error)"þbtype.func(*"".tlsClientCon, []uint8) (int, error)ÀÀ´¥j3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*docker.tlsClientCon, []uint8) (int, error)"ptgo.weak.type.*func(*"".tlsClientCon, []uint8) (int, error)€"runtime.zerovalue €btype.func(*"".tlsClientCon, []uint8) (int, error)Рbtype.func(*"".tlsClientCon, []uint8) (int, error)€*type.*"".tlsClientContype.[]uint8 type.int°type.errorþngo.string."func(*docker.tlsClientCon, time.Time) error"€x+func(*docker.tlsClientCon, time.Time) error ngo.string."func(*docker.tlsClientCon, time.Time) error"þXtype.func(*"".tlsClientCon, time.Time) error°°ºÙ—^3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(*docker.tlsClientCon, time.Time) error"pjgo.weak.type.*func(*"".tlsClientCon, time.Time) error€"runtime.zerovalue €Xtype.func(*"".tlsClientCon, time.Time) errorРXtype.func(*"".tlsClientCon, time.Time) error€*type.*"".tlsClientContype.time.Time type.errorþhgo.string."func(*docker.tlsClientCon, string) error"€r(func(*docker.tlsClientCon, string) error hgo.string."func(*docker.tlsClientCon, string) error"þRtype.func(*"".tlsClientCon, string) error°°øR*3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(*docker.tlsClientCon, string) error"pdgo.weak.type.*func(*"".tlsClientCon, string) error€"runtime.zerovalue €Rtype.func(*"".tlsClientCon, string) errorРRtype.func(*"".tlsClientCon, string) error€*type.*"".tlsClientContype.string type.errorþ’go.string."func(*docker.tlsClientCon, []uint8) (*tls.sessionState, bool)" œ=func(*docker.tlsClientCon, []uint8) (*tls.sessionState, bool) ’go.string."func(*docker.tlsClientCon, []uint8) (*tls.sessionState, bool)"þŠtype.func(*"".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)ÀÀ{‰7x3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P’go.string."func(*docker.tlsClientCon, []uint8) (*tls.sessionState, bool)"pœgo.weak.type.*func(*"".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)€"runtime.zerovalue €Štype.func(*"".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)РŠtype.func(*"".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)€*type.*"".tlsClientContype.[]uint8 :type.*crypto/tls.sessionState°type.boolþ”go.string."func(*docker.tlsClientCon, *tls.sessionState) ([]uint8, error)" ž>func(*docker.tlsClientCon, *tls.sessionState) ([]uint8, error) ”go.string."func(*docker.tlsClientCon, *tls.sessionState) ([]uint8, error)"þŒtype.func(*"".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)ÀÀ`ÖéO3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P”go.string."func(*docker.tlsClientCon, *tls.sessionState) ([]uint8, error)"pžgo.weak.type.*func(*"".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)€"runtime.zerovalue €Œtype.func(*"".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)РŒtype.func(*"".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)€*type.*"".tlsClientCon:type.*crypto/tls.sessionState type.[]uint8°type.errorþxgo.string."func(*docker.tlsClientCon) (interface {}, error)"‚0func(*docker.tlsClientCon) (interface {}, error) xgo.string."func(*docker.tlsClientCon) (interface {}, error)"þbtype.func(*"".tlsClientCon) (interface {}, error)°°“Mªó3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*docker.tlsClientCon) (interface {}, error)"ptgo.weak.type.*func(*"".tlsClientCon) (interface {}, error)€"runtime.zerovalue €btype.func(*"".tlsClientCon) (interface {}, error)Ðbtype.func(*"".tlsClientCon) (interface {}, error)€*type.*"".tlsClientCon"type.interface {} type.errorþxgo.string."func(*docker.tlsClientCon, tls.recordType) error"‚0func(*docker.tlsClientCon, tls.recordType) error xgo.string."func(*docker.tlsClientCon, tls.recordType) error"þptype.func(*"".tlsClientCon, crypto/tls.recordType) error°°J´çÉ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*docker.tlsClientCon, tls.recordType) error"p‚go.weak.type.*func(*"".tlsClientCon, crypto/tls.recordType) error€"runtime.zerovalue €ptype.func(*"".tlsClientCon, crypto/tls.recordType) errorРptype.func(*"".tlsClientCon, crypto/tls.recordType) error€*type.*"".tlsClientCon4type.crypto/tls.recordType type.errorþngo.string."func(*docker.tlsClientCon, tls.alert) error"€x+func(*docker.tlsClientCon, tls.alert) error ngo.string."func(*docker.tlsClientCon, tls.alert) error"þftype.func(*"".tlsClientCon, crypto/tls.alert) error°°•yb.3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(*docker.tlsClientCon, tls.alert) error"pxgo.weak.type.*func(*"".tlsClientCon, crypto/tls.alert) error€"runtime.zerovalue €ftype.func(*"".tlsClientCon, crypto/tls.alert) errorРftype.func(*"".tlsClientCon, crypto/tls.alert) error€*type.*"".tlsClientCon*type.crypto/tls.alert type.errorþºgo.string."func(*docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"ÐÄQfunc(*docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite ºgo.string."func(*docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"þ²type.func(*"".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteðð¡XŒ¤3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pºgo.string."func(*docker.tlsClientCon, uint16, []uint16, uint16, bool, bool) *tls.cipherSuite"pÄgo.weak.type.*func(*"".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€"runtime.zerovalue €²type.func(*"".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteÐà²type.func(*"".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€*type.*"".tlsClientContype.uint16 type.[]uint16°type.uint16Àtype.boolÐtype.boolà8type.*crypto/tls.cipherSuiteþ˜go.string."func(*docker.tlsClientCon, tls.recordType, []uint8) (int, error)"°¢@func(*docker.tlsClientCon, tls.recordType, []uint8) (int, error) ˜go.string."func(*docker.tlsClientCon, tls.recordType, []uint8) (int, error)"þtype.func(*"".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)ÐЧBB‹3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P˜go.string."func(*docker.tlsClientCon, tls.recordType, []uint8) (int, error)"p¢go.weak.type.*func(*"".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)€"runtime.zerovalue €type.func(*"".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)аtype.func(*"".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)€*type.*"".tlsClientCon4type.crypto/tls.recordType type.[]uint8°type.intÀtype.errorþ*type.*"".tlsClientCon&ä}á6Š   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."*docker.tlsClientCon"p"".(*tlsClientCon).OCSPResponse >"".(*tlsClientCon).OCSPResponse° go.string."Read"Ð>type.func([]uint8) (int, error)àbtype.func(*"".tlsClientCon, []uint8) (int, error)ð."".(*tlsClientCon).Read€."".(*tlsClientCon).Read,go.string."RemoteAddr"°(type.func() net.AddrÀHtype.func(*"".tlsClientCon) net.AddrÐ:"".(*tlsClientCon).RemoteAddrà:"".(*tlsClientCon).RemoteAddrð.go.string."SetDeadline"4type.func(time.Time) error Xtype.func(*"".tlsClientCon, time.Time) error°<"".(*tlsClientCon).SetDeadlineÀ<"".(*tlsClientCon).SetDeadlineÐ6go.string."SetReadDeadline"ð4type.func(time.Time) error€ Xtype.func(*"".tlsClientCon, time.Time) error D"".(*tlsClientCon).SetReadDeadline  D"".(*tlsClientCon).SetReadDeadline° 8go.string."SetWriteDeadline"Ð 4type.func(time.Time) errorà Xtype.func(*"".tlsClientCon, time.Time) errorð F"".(*tlsClientCon).SetWriteDeadline€ +F"".(*tlsClientCon).SetWriteDeadline +4go.string."VerifyHostname"° +.type.func(string) errorÀ +Rtype.func(*"".tlsClientCon, string) errorÐ +B"".(*tlsClientCon).VerifyHostnameà +B"".(*tlsClientCon).VerifyHostnameð +"go.string."Write" >type.func([]uint8) (int, error)  btype.func(*"".tlsClientCon, []uint8) (int, error)° 0"".(*tlsClientCon).WriteÀ 0"".(*tlsClientCon).WriteÐ 6go.string."clientHandshake"à 2go.importpath.crypto/tls.ð "type.func() error€ Btype.func(*"".tlsClientCon) error Z"".(*tlsClientCon).crypto/tls.clientHandshake  Z"".(*tlsClientCon).crypto/tls.clientHandshake° 2go.string."decryptTicket"À 2go.importpath.crypto/tls.Ð ftype.func([]uint8) (*crypto/tls.sessionState, bool)à Štype.func(*"".tlsClientCon, []uint8) (*crypto/tls.sessionState, bool)ð V"".(*tlsClientCon).crypto/tls.decryptTicket€ V"".(*tlsClientCon).crypto/tls.decryptTicket 2go.string."encryptTicket"  2go.importpath.crypto/tls.° htype.func(*crypto/tls.sessionState) ([]uint8, error)À Œtype.func(*"".tlsClientCon, *crypto/tls.sessionState) ([]uint8, error)Ð V"".(*tlsClientCon).crypto/tls.encryptTicketà V"".(*tlsClientCon).crypto/tls.encryptTicketð 2go.string."readHandshake"€2go.importpath.crypto/tls.Btype.func() (interface {}, error) btype.func(*"".tlsClientCon) (interface {}, error)°V"".(*tlsClientCon).crypto/tls.readHandshakeÀV"".(*tlsClientCon).crypto/tls.readHandshakeÐ,go.string."readRecord"à2go.importpath.crypto/tls.ðLtype.func(crypto/tls.recordType) error€ptype.func(*"".tlsClientCon, crypto/tls.recordType) errorP"".(*tlsClientCon).crypto/tls.readRecord P"".(*tlsClientCon).crypto/tls.readRecord°*go.string."sendAlert"À2go.importpath.crypto/tls.ÐBtype.func(crypto/tls.alert) erroràftype.func(*"".tlsClientCon, crypto/tls.alert) errorðN"".(*tlsClientCon).crypto/tls.sendAlert€N"".(*tlsClientCon).crypto/tls.sendAlert6go.string."sendAlertLocked" 2go.importpath.crypto/tls.°Btype.func(crypto/tls.alert) errorÀftype.func(*"".tlsClientCon, crypto/tls.alert) errorÐZ"".(*tlsClientCon).crypto/tls.sendAlertLockedàZ"".(*tlsClientCon).crypto/tls.sendAlertLockedð6go.string."serverHandshake"€2go.importpath.crypto/tls."type.func() error Btype.func(*"".tlsClientCon) error°Z"".(*tlsClientCon).crypto/tls.serverHandshakeÀZ"".(*tlsClientCon).crypto/tls.serverHandshakeÐ4go.string."tryCipherSuite"à2go.importpath.crypto/tls.ðŽtype.func(uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuite€²type.func(*"".tlsClientCon, uint16, []uint16, uint16, bool, bool) *crypto/tls.cipherSuiteX"".(*tlsClientCon).crypto/tls.tryCipherSuite X"".(*tlsClientCon).crypto/tls.tryCipherSuite°.go.string."writeRecord"À2go.importpath.crypto/tls.Ðltype.func(crypto/tls.recordType, []uint8) (int, error)àtype.func(*"".tlsClientCon, crypto/tls.recordType, []uint8) (int, error)ðR"".(*tlsClientCon).crypto/tls.writeRecord€R"".(*tlsClientCon).crypto/tls.writeRecordþbruntime.gcbits.0x84000000000000000000000000000000 „þ`go.string."struct { F uintptr; A0 *chan error }"pj$struct { F uintptr; A0 *chan error } `go.string."struct { F uintptr; A0 *chan error }"þRtype.struct { F uintptr; A0 *chan error }àà|Y"Å À runtime.algarray0bruntime.gcbits.0x84000000000000000000000000000000P`go.string."struct { F uintptr; A0 *chan error }"pdgo.weak.type.*struct { F uintptr; A0 *chan error }€"runtime.zerovalueÀRtype.struct { F uintptr; A0 *chan error }Àgo.string."F"àtype.uintptrgo.string."A0"° type.*chan errorþ,go.string."**tls.Conn"@6 +**tls.Conn ,go.string."**tls.Conn"þ,type.**crypto/tls.Conn  ÏÈ]6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."**tls.Conn"p>go.weak.type.***crypto/tls.Conn€"runtime.zerovalue*type.*crypto/tls.Connþ~go.string."struct { F uintptr; A0 *chan error; A1 **tls.Conn }"ˆ3struct { F uintptr; A0 *chan error; A1 **tls.Conn } ~go.string."struct { F uintptr; A0 *chan error; A1 **tls.Conn }"þ~type.struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }°°ù%F  runtime.algarray0bruntime.gcbits.0x84488800000000000000000000000000P~go.string."struct { F uintptr; A0 *chan error; A1 **tls.Conn }"pgo.weak.type.*struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }€"runtime.zerovalueÀ~type.struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }Àgo.string."F"àtype.uintptrgo.string."A0"° type.*chan erroràgo.string."A1"€,type.**crypto/tls.Connþbgo.string."*struct { F uintptr; A0 *chan error }"pl%*struct { F uintptr; A0 *chan error } bgo.string."*struct { F uintptr; A0 *chan error }"þTtype.*struct { F uintptr; A0 *chan error }  µt6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."*struct { F uintptr; A0 *chan error }"pfgo.weak.type.**struct { F uintptr; A0 *chan error }€"runtime.zerovalueRtype.struct { F uintptr; A0 *chan error }þ€go.string."*struct { F uintptr; A0 *chan error; A1 **tls.Conn }"Š4*struct { F uintptr; A0 *chan error; A1 **tls.Conn } €go.string."*struct { F uintptr; A0 *chan error; A1 **tls.Conn }"þ€type.*struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }  í‰*ü6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P€go.string."*struct { F uintptr; A0 *chan error; A1 **tls.Conn }"p’go.weak.type.**struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }€"runtime.zerovalue~type.struct { F uintptr; A0 *chan error; A1 **crypto/tls.Conn }þbruntime.gcbits.0x88444888444800000000000000000000 ˆDHˆDHþ""..gostring.14‚p[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 } ""..gostring.14þêtype.[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }ÀÀ`ÿã*ƒ à runtime.algarray0bruntime.gcbits.0x88444888444800000000000000000000P""..gostring.14pügo.weak.type.*[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }€"runtime.zerovalueätype.struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 } ètype.[]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }þÚgo.typelink.[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }/[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }êtype.[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }þ*go.string."[2]*uint8"@4 [2]*uint8 *go.string."[2]*uint8"þtype.[2]*uint8ÀÀ¡ðV À runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P*go.string."[2]*uint8"p.go.weak.type.*[2]*uint8€"runtime.zerovaluetype.*uint8 type.[]*uint8þ>go.typelink.[2]*uint8/[2]*uint8type.[2]*uint8þ*go.string."[2]uint16"@4 [2]uint16 *go.string."[2]uint16"þtype.[2]uint16ÀÀ ÎUI‘ € runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."[2]uint16"p.go.weak.type.*[2]uint16€"runtime.zerovaluetype.uint16 type.[]uint16þ>go.typelink.[2]uint16/[2]uint16type.[2]uint16þbruntime.gcbits.0x84884884844884844800000000000000 „ˆH„„H„„Hþ""..gostring.15„ñstruct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [2]*uint8; pollorderarr [2]uint16 } ""..gostring.15þìtype.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [2]*uint8; pollorderarr [2]uint16 }ðð©Ûxáxˆ6 à runtime.algarray0bruntime.gcbits.0x84884884844884844800000000000000P""..gostring.15pþgo.weak.type.*struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [2]*uint8; pollorderarr [2]uint16 }€"runtime.zerovalueÀìtype.struct { tcase uint16; ncase uint16; pollorder *uint8; lockorder *uint8; scase [2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }; lockorderarr [2]*uint8; pollorderarr [2]uint16 }À"go.string."tcase"Ð"go.importpath."".àtype.uint16"go.string."ncase" "go.importpath."".°type.uint16à*go.string."pollorder"ð"go.importpath."".€type.*uint8°*go.string."lockorder"À"go.importpath."".Ðtype.*uint8€"go.string."scase""go.importpath."". êtype.[2]struct { elem *uint8; chan *uint8; pc uintptr; kind uint16; so uint16; receivedp *uint8; releasetime uint64 }Ð0go.string."lockorderarr"à"go.importpath."".ðtype.[2]*uint8 0go.string."pollorderarr"°"go.importpath."".Àtype.[2]uint16þ,go.string."*[8]string"@6 +*[8]string ,go.string."*[8]string"þtype.*[8]string  ­”o6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[8]string"p0go.weak.type.**[8]string€"runtime.zerovaluetype.[8]stringþPgo.string."*[8]docker.AuthConfiguration"`Z*[8]docker.AuthConfiguration Pgo.string."*[8]docker.AuthConfiguration"þ:type.*[8]"".AuthConfiguration  B£U|6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."*[8]docker.AuthConfiguration"pLgo.weak.type.**[8]"".AuthConfiguration€"runtime.zerovalue8type.[8]"".AuthConfigurationþFgo.string."*[8]docker.dockerConfig"PP*[8]docker.dockerConfig Fgo.string."*[8]docker.dockerConfig"þ0type.*[8]"".dockerConfig  ¥)u¨6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*[8]docker.dockerConfig"pBgo.weak.type.**[8]"".dockerConfig€"runtime.zerovalue.type.[8]"".dockerConfigþ6go.string."*[8]docker.Port"@@*[8]docker.Port 6go.string."*[8]docker.Port"þ type.*[8]"".Port  D18Û6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*[8]docker.Port"p2go.weak.type.**[8]"".Port€"runtime.zerovaluetype.[8]"".Portþ8go.string."*[8]interface {}"PB*[8]interface {} 8go.string."*[8]interface {}"þ*type.*[8]interface {}  ‰aK6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*[8]interface {}"ptype..hash."".AuthConfigurationþ$runtime.strhash·fruntime.strhashþ@type..eq."".AuthConfiguration·f:type..eq."".AuthConfigurationþ.type..hash.[8]string·f(type..hash.[8]stringþ*type..eq.[8]string·f$type..eq.[8]stringþJtype..hash.[8]"".AuthConfiguration·fDtype..hash.[8]"".AuthConfigurationþFtype..eq.[8]"".AuthConfiguration·f@type..eq.[8]"".AuthConfigurationþ.type..hash.[3]string·f(type..hash.[3]stringþ*type..eq.[3]string·f$type..eq.[3]stringþ.type..hash.[2]string·f(type..hash.[2]stringþ*type..eq.[2]string·f$type..eq.[2]stringþ:type..hash."".dockerConfig·f4type..hash."".dockerConfigþ6type..eq."".dockerConfig·f0type..eq."".dockerConfigþ@type..hash.[8]"".dockerConfig·f:type..hash.[8]"".dockerConfigþ"".(*eventMonitoringState).LockþF"".(*eventMonitoringState).RLock·f@"".(*eventMonitoringState).RLockþJ"".(*eventMonitoringState).RLocker·fD"".(*eventMonitoringState).RLockerþJ"".(*eventMonitoringState).RUnlock·fD"".(*eventMonitoringState).RUnlockþH"".(*eventMonitoringState).Unlock·fB"".(*eventMonitoringState).UnlockþB"".(*eventMonitoringState).Add·f<"".(*eventMonitoringState).AddþD"".(*eventMonitoringState).Done·f>"".(*eventMonitoringState).DoneþD"".(*eventMonitoringState).Wait·f>"".(*eventMonitoringState).Waitþ4"".(*APIVersion).String·f."".(*APIVersion).Stringþ(runtime.panicwrap·f"runtime.panicwrapþ8"".(*APIVersion).LessThan·f2"".(*APIVersion).LessThanþJ"".(*APIVersion).LessThanOrEqualTo·fD"".(*APIVersion).LessThanOrEqualToþ>"".(*APIVersion).GreaterThan·f8"".(*APIVersion).GreaterThanþP"".(*APIVersion).GreaterThanOrEqualTo·fJ"".(*APIVersion).GreaterThanOrEqualToþ6"".(*APIVersion).compare·f0"".(*APIVersion).compareþRtype..hash."".AttachToContainerOptions·fLtype..hash."".AttachToContainerOptionsþ(runtime.interhash·f"runtime.interhashþNtype..eq."".AttachToContainerOptions·fHtype..eq."".AttachToContainerOptionsþ&runtime.memequal·f runtime.memequalþ$"".(*Port).Port·f"".(*Port).Portþ&"".(*Port).Proto·f "".(*Port).Protoþ0type..hash.[8]"".Port·f*type..hash.[8]"".Portþ,type..eq.[8]"".Port·f&type..eq.[8]"".PortþNtype..hash."".CommitContainerOptions·fHtype..hash."".CommitContainerOptionsþJtype..eq."".CommitContainerOptions·fDtype..eq."".CommitContainerOptionsþ.type..hash."".Change·f(type..hash."".Changeþ*type..eq."".Change·f$type..eq."".ChangeþRtype..hash."".CopyFromContainerOptions·fLtype..hash."".CopyFromContainerOptionsþNtype..eq."".CopyFromContainerOptions·fHtype..eq."".CopyFromContainerOptionsþ:type..hash."".KeyValuePair·f4type..hash."".KeyValuePairþ6type..eq."".KeyValuePair·f0type..eq."".KeyValuePairþ8type..hash."".PortBinding·f2type..hash."".PortBindingþ4type..eq."".PortBinding·f.type..eq."".PortBindingþtype..eq."".PullImageOptions·f8type..eq."".PullImageOptionsþBtype..hash."".PushImageOptions·ftype..eq."".PushImageOptions·f8type..eq."".PushImageOptionsþNtype..hash."".RemoveContainerOptions·fHtype..hash."".RemoveContainerOptionsþJtype..eq."".RemoveContainerOptions·fDtype..eq."".RemoveContainerOptionsþNtype..hash."".RenameContainerOptions·fHtype..hash."".RenameContainerOptionsþJtype..eq."".RenameContainerOptions·fDtype..eq."".RenameContainerOptionsþ>type..hash."".APIImageSearch·f8type..hash."".APIImageSearchþ:type..eq."".APIImageSearch·f4type..eq."".APIImageSearchþBtype..hash."".StartExecOptions·ftype..eq."".StartExecOptions·f8type..eq."".StartExecOptionsþ@type..hash."".BlkioStatsEntry·f:type..hash."".BlkioStatsEntryþ"".tlsClientCon.ConnectionStateþJcrypto/tls.(*Conn).ConnectionState·fDcrypto/tls.(*Conn).ConnectionStateþ>"".(*tlsClientCon).Handshake·f8"".(*tlsClientCon).Handshakeþ8"".tlsClientCon.Handshake·f2"".tlsClientCon.Handshakeþ>"".(*tlsClientCon).LocalAddr·f8"".(*tlsClientCon).LocalAddrþ8"".tlsClientCon.LocalAddr·f2"".tlsClientCon.LocalAddrþ>crypto/tls.(*Conn).LocalAddr·f8crypto/tls.(*Conn).LocalAddrþD"".(*tlsClientCon).OCSPResponse·f>"".(*tlsClientCon).OCSPResponseþ>"".tlsClientCon.OCSPResponse·f8"".tlsClientCon.OCSPResponseþDcrypto/tls.(*Conn).OCSPResponse·f>crypto/tls.(*Conn).OCSPResponseþ4"".(*tlsClientCon).Read·f."".(*tlsClientCon).Readþ."".tlsClientCon.Read·f("".tlsClientCon.Readþ4crypto/tls.(*Conn).Read·f.crypto/tls.(*Conn).Readþ@"".(*tlsClientCon).RemoteAddr·f:"".(*tlsClientCon).RemoteAddrþ:"".tlsClientCon.RemoteAddr·f4"".tlsClientCon.RemoteAddrþ@crypto/tls.(*Conn).RemoteAddr·f:crypto/tls.(*Conn).RemoteAddrþB"".(*tlsClientCon).SetDeadline·f<"".(*tlsClientCon).SetDeadlineþ<"".tlsClientCon.SetDeadline·f6"".tlsClientCon.SetDeadlineþBcrypto/tls.(*Conn).SetDeadline·f"".tlsClientCon.SetReadDeadlineþJcrypto/tls.(*Conn).SetReadDeadline·fDcrypto/tls.(*Conn).SetReadDeadlineþL"".(*tlsClientCon).SetWriteDeadline·fF"".(*tlsClientCon).SetWriteDeadlineþF"".tlsClientCon.SetWriteDeadline·f@"".tlsClientCon.SetWriteDeadlineþLcrypto/tls.(*Conn).SetWriteDeadline·fFcrypto/tls.(*Conn).SetWriteDeadlineþH"".(*tlsClientCon).VerifyHostname·fB"".(*tlsClientCon).VerifyHostnameþB"".tlsClientCon.VerifyHostname·f<"".tlsClientCon.VerifyHostnameþHcrypto/tls.(*Conn).VerifyHostname·fBcrypto/tls.(*Conn).VerifyHostnameþ6"".(*tlsClientCon).Write·f0"".(*tlsClientCon).Writeþ0"".tlsClientCon.Write·f*"".tlsClientCon.Writeþ6crypto/tls.(*Conn).Write·f0crypto/tls.(*Conn).Writeþ`"".(*tlsClientCon).crypto/tls.clientHandshake·fZ"".(*tlsClientCon).crypto/tls.clientHandshakeþZ"".tlsClientCon.crypto/tls.clientHandshake·fT"".tlsClientCon.crypto/tls.clientHandshakeþJcrypto/tls.(*Conn).clientHandshake·fDcrypto/tls.(*Conn).clientHandshakeþ\"".(*tlsClientCon).crypto/tls.decryptTicket·fV"".(*tlsClientCon).crypto/tls.decryptTicketþV"".tlsClientCon.crypto/tls.decryptTicket·fP"".tlsClientCon.crypto/tls.decryptTicketþFcrypto/tls.(*Conn).decryptTicket·f@crypto/tls.(*Conn).decryptTicketþ\"".(*tlsClientCon).crypto/tls.encryptTicket·fV"".(*tlsClientCon).crypto/tls.encryptTicketþV"".tlsClientCon.crypto/tls.encryptTicket·fP"".tlsClientCon.crypto/tls.encryptTicketþFcrypto/tls.(*Conn).encryptTicket·f@crypto/tls.(*Conn).encryptTicketþ\"".(*tlsClientCon).crypto/tls.readHandshake·fV"".(*tlsClientCon).crypto/tls.readHandshakeþV"".tlsClientCon.crypto/tls.readHandshake·fP"".tlsClientCon.crypto/tls.readHandshakeþFcrypto/tls.(*Conn).readHandshake·f@crypto/tls.(*Conn).readHandshakeþV"".(*tlsClientCon).crypto/tls.readRecord·fP"".(*tlsClientCon).crypto/tls.readRecordþP"".tlsClientCon.crypto/tls.readRecord·fJ"".tlsClientCon.crypto/tls.readRecordþ@crypto/tls.(*Conn).readRecord·f:crypto/tls.(*Conn).readRecordþT"".(*tlsClientCon).crypto/tls.sendAlert·fN"".(*tlsClientCon).crypto/tls.sendAlertþN"".tlsClientCon.crypto/tls.sendAlert·fH"".tlsClientCon.crypto/tls.sendAlertþ>crypto/tls.(*Conn).sendAlert·f8crypto/tls.(*Conn).sendAlertþ`"".(*tlsClientCon).crypto/tls.sendAlertLocked·fZ"".(*tlsClientCon).crypto/tls.sendAlertLockedþZ"".tlsClientCon.crypto/tls.sendAlertLocked·fT"".tlsClientCon.crypto/tls.sendAlertLockedþJcrypto/tls.(*Conn).sendAlertLocked·fDcrypto/tls.(*Conn).sendAlertLockedþ`"".(*tlsClientCon).crypto/tls.serverHandshake·fZ"".(*tlsClientCon).crypto/tls.serverHandshakeþZ"".tlsClientCon.crypto/tls.serverHandshake·fT"".tlsClientCon.crypto/tls.serverHandshakeþJcrypto/tls.(*Conn).serverHandshake·fDcrypto/tls.(*Conn).serverHandshakeþ^"".(*tlsClientCon).crypto/tls.tryCipherSuite·fX"".(*tlsClientCon).crypto/tls.tryCipherSuiteþX"".tlsClientCon.crypto/tls.tryCipherSuite·fR"".tlsClientCon.crypto/tls.tryCipherSuiteþHcrypto/tls.(*Conn).tryCipherSuite·fBcrypto/tls.(*Conn).tryCipherSuiteþX"".(*tlsClientCon).crypto/tls.writeRecord·fR"".(*tlsClientCon).crypto/tls.writeRecordþR"".tlsClientCon.crypto/tls.writeRecord·fL"".tlsClientCon.crypto/tls.writeRecordþBcrypto/tls.(*Conn).writeRecord·f +__.PKGDEF 0 0 0 644 26062 ` +go object darwin amd64 go1.4.2 X:precisestack + +$$ +package logrus + import log "log" + import sync "sync" + import runtime "runtime" + import bufio "bufio" + import time "time" + import io "io" + import os "os" + import strings "strings" + import syscall "syscall" + import fmt "fmt" + import sort "sort" + import json "encoding/json" + import bytes "bytes" + import unsafe "unsafe" + type @"io".Writer interface { Write(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"".Level uint8 + func (@"".level·2 @"".Level) String () (? string) + type @"".Hook interface { Fire(? *@"".Entry) (? error); Levels() (? []@"".Level) } + type @"".LevelHooks map[@"".Level][]@"".Hook + func (@"".hooks·1 @"".LevelHooks "esc:0x0") Add (@"".hook·2 @"".Hook) + func (@"".hooks·2 @"".LevelHooks "esc:0x0") Fire (@"".level·3 @"".Level, @"".entry·4 *@"".Entry) (? error) + type @"".Formatter interface { Format(? *@"".Entry) (? []byte, ? error) } + type @"sync".Mutex struct { @"sync".state int32; @"sync".sema uint32 } + func (@"sync".m·1 *@"sync".Mutex) Lock () + func (@"sync".m·1 *@"sync".Mutex) Unlock () + type @"".Fields map[string]interface {} + type @"sync".Locker interface { Lock(); Unlock() } + type @"sync".syncSema struct { @"sync".lock uintptr; @"sync".head @"unsafe".Pointer; @"sync".tail @"unsafe".Pointer } + type @"sync".copyChecker uintptr + func (@"sync".c·1 *@"sync".copyChecker) @"sync".check () + type @"sync".Cond struct { L @"sync".Locker; @"sync".sema @"sync".syncSema; @"sync".waiters uint32; @"sync".checker @"sync".copyChecker } + func (@"sync".c·1 *@"sync".Cond) Broadcast () + func (@"sync".c·1 *@"sync".Cond) Signal () + func (@"sync".c·1 *@"sync".Cond) Wait () + func (@"sync".c·1 *@"sync".Cond) @"sync".signalImpl (@"sync".all·2 bool) + type @"io".pipe struct { @"io".rl @"sync".Mutex; @"io".wl @"sync".Mutex; @"io".l @"sync".Mutex; @"io".data []byte; @"io".rwait @"sync".Cond; @"io".wwait @"sync".Cond; @"io".rerr error; @"io".werr error } + func (@"io".p·1 *@"io".pipe) @"io".rclose (@"io".err·2 error) + func (@"io".p·3 *@"io".pipe) @"io".read (@"io".b·4 []byte "esc:0x0") (@"io".n·1 int, @"io".err·2 error) + func (@"io".p·1 *@"io".pipe) @"io".wclose (@"io".err·2 error) + func (@"io".p·3 *@"io".pipe) @"io".write (@"io".b·4 []byte) (@"io".n·1 int, @"io".err·2 error) + type @"io".PipeWriter struct { @"io".p *@"io".pipe } + func (@"io".w·2 *@"io".PipeWriter) Close () (? error) + func (@"io".w·2 *@"io".PipeWriter) CloseWithError (@"io".err·3 error) (? error) + func (@"io".w·3 *@"io".PipeWriter) Write (@"io".data·4 []byte) (@"io".n·1 int, @"io".err·2 error) + type @"io".PipeReader struct { @"io".p *@"io".pipe } + func (@"io".r·2 *@"io".PipeReader) Close () (? error) + func (@"io".r·2 *@"io".PipeReader) CloseWithError (@"io".err·3 error) (? error) + func (@"io".r·3 *@"io".PipeReader) Read (@"io".data·4 []byte "esc:0x0") (@"io".n·1 int, @"io".err·2 error) + type @"".Logger struct { Out @"io".Writer; Hooks @"".LevelHooks; Formatter @"".Formatter; Level @"".Level; @"".mu @"sync".Mutex } + func (@"".logger·1 *@"".Logger) Debug (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Debugf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Debugln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Error (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Errorf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Errorln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Fatal (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Fatalf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Fatalln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Info (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Infof (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Infoln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Panic (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Panicf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Panicln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Print (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Printf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Println (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Warn (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Warnf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Warning (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Warningf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Warningln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·1 *@"".Logger) Warnln (@"".args·2 ...interface {} "esc:0x0") + func (@"".logger·2 *@"".Logger) WithField (@"".key·3 string, @"".value·4 interface {}) (? *@"".Entry) + func (@"".logger·2 *@"".Logger) WithFields (@"".fields·3 @"".Fields "esc:0x0") (? *@"".Entry) + func (@"".logger·2 *@"".Logger) Writer () (? *@"io".PipeWriter) + func (@"".logger·1 *@"".Logger) @"".writerScanner (@"".reader·2 *@"io".PipeReader) + type @"time".zone struct { @"time".name string; @"time".offset int; @"time".isDST bool } + type @"time".zoneTrans struct { @"time".when int64; @"time".index uint8; @"time".isstd bool; @"time".isutc bool } + type @"time".Location struct { @"time".name string; @"time".zone []@"time".zone; @"time".tx []@"time".zoneTrans; @"time".cacheStart int64; @"time".cacheEnd int64; @"time".cacheZone *@"time".zone } + func (@"time".l·2 *@"time".Location "esc:0x0") String () (? string) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".firstZoneUsed () (? bool) + func (@"time".l·2 *@"time".Location "esc:0x2") @"time".get () (? *@"time".Location) + func (@"time".l·6 *@"time".Location "esc:0x1") @"time".lookup (@"time".sec·7 int64) (@"time".name·1 string, @"time".offset·2 int, @"time".isDST·3 bool, @"time".start·4 int64, @"time".end·5 int64) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".lookupFirstZone () (? int) + func (@"time".l·4 *@"time".Location "esc:0x0") @"time".lookupName (@"time".name·5 string "esc:0x0", @"time".unix·6 int64) (@"time".offset·1 int, @"time".isDST·2 bool, @"time".ok·3 bool) + type @"time".Duration int64 + func (@"time".d·2 @"time".Duration) Hours () (? float64) { var @"time".hour·3 @"time".Duration; ; @"time".hour·3 = @"time".d·2 / @"time".Duration(0x34630B8A000); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x34630B8A000); return float64(@"time".hour·3) + float64(@"time".nsec·4) * 0x9C5FFF26ED75Fp-93 } + func (@"time".d·2 @"time".Duration) Minutes () (? float64) { var @"time".min·3 @"time".Duration; ; @"time".min·3 = @"time".d·2 / @"time".Duration(0xDF8475800); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0xDF8475800); return float64(@"time".min·3) + float64(@"time".nsec·4) * 0x9299FF347E9E9p-87 } + func (@"time".d·2 @"time".Duration) Nanoseconds () (? int64) { return int64(@"time".d·2) } + func (@"time".d·2 @"time".Duration) Seconds () (? float64) { var @"time".sec·3 @"time".Duration; ; @"time".sec·3 = @"time".d·2 / @"time".Duration(0x3B9ACA00); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x3B9ACA00); return float64(@"time".sec·3) + float64(@"time".nsec·4) * 0x112E0BE826D695p-82 } + func (@"time".d·2 @"time".Duration) String () (? string) + type @"time".Month int + func (@"time".m·2 @"time".Month) String () (? string) { return @"time".months[@"time".m·2 - @"time".Month(0x1)] } + type @"time".Weekday int + func (@"time".d·2 @"time".Weekday) String () (? string) { return @"time".days[@"time".d·2] } + type @"time".Time struct { @"time".sec int64; @"time".nsec int32; @"time".loc *@"time".Location } + func (@"time".t·2 @"time".Time "esc:0x2") Add (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") AddDate (@"time".years·3 int, @"time".months·4 int, @"time".days·5 int) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") After (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec > @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec > @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Before (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec < @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec < @"time".u·3.@"time".nsec } + func (@"time".t·4 @"time".Time "esc:0x0") Clock () (@"time".hour·1 int, @"time".min·2 int, @"time".sec·3 int) + func (@"time".t·4 @"time".Time "esc:0x0") Date () (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int) + func (@"time".t·2 @"time".Time "esc:0x0") Day () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Equal (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec == @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Format (@"time".layout·3 string "esc:0x0") (? string) + func (@"time".t·2 *@"time".Time "esc:0x0") GobDecode (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·3 @"time".Time "esc:0x0") GobEncode () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Hour () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") ISOWeek () (@"time".year·1 int, @"time".week·2 int) + func (@"time".t·2 @"time".Time "esc:0x2") In (@"time".loc·3 *@"time".Location "esc:0x2") (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") IsZero () (? bool) { return @"time".t·2.@"time".sec == 0x0 && @"time".t·2.@"time".nsec == 0x0 } + func (@"time".t·2 @"time".Time "esc:0x2") Local () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".Local; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x2") Location () (? *@"time".Location) { var @"time".l·3 *@"time".Location; ; @"time".l·3 = @"time".t·2.@"time".loc; if @"time".l·3 == nil { @"time".l·3 = @"time".UTC }; return @"time".l·3 } + func (@"time".t·3 @"time".Time "esc:0x0") MarshalBinary () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalText () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Minute () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Month () (? @"time".Month) + func (@"time".t·2 @"time".Time "esc:0x0") Nanosecond () (? int) { return int(@"time".t·2.@"time".nsec) } + func (@"time".t·2 @"time".Time "esc:0x2") Round (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") Second () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") String () (? string) + func (@"time".t·2 @"time".Time "esc:0x0") Sub (@"time".u·3 @"time".Time "esc:0x0") (? @"time".Duration) + func (@"time".t·2 @"time".Time "esc:0x2") Truncate (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") UTC () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".UTC; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x0") Unix () (? int64) { return @"time".t·2.@"time".sec + -0xE7791F700 } + func (@"time".t·2 @"time".Time "esc:0x0") UnixNano () (? int64) { return (@"time".t·2.@"time".sec + -0xE7791F700) * 0x3B9ACA00 + int64(@"time".t·2.@"time".nsec) } + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalBinary (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalJSON (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalText (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 @"time".Time "esc:0x0") Weekday () (? @"time".Weekday) + func (@"time".t·2 @"time".Time "esc:0x0") Year () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") YearDay () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") Zone () (@"time".name·1 string, @"time".offset·2 int) + func (@"time".t·2 @"time".Time "esc:0x0") @"time".abs () (? uint64) + func (@"time".t·5 @"time".Time "esc:0x0") @"time".date (@"time".full·6 bool) (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int, @"time".yday·4 int) + func (@"time".t·4 @"time".Time "esc:0x1") @"time".locabs () (@"time".name·1 string, @"time".offset·2 int, @"time".abs·3 uint64) + type @"bytes".readOp int + type @"io".Reader interface { Read(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"bytes".Buffer struct { @"bytes".buf []byte; @"bytes".off int; @"bytes".runeBytes [4]byte; @"bytes".bootstrap [64]byte; @"bytes".lastRead @"bytes".readOp } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Bytes () (? []byte) { return @"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:] } + func (@"bytes".b·1 *@"bytes".Buffer) Grow (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") Len () (? int) { return len(@"bytes".b·2.@"bytes".buf) - @"bytes".b·2.@"bytes".off } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Next (@"bytes".n·3 int) (? []byte) + func (@"bytes".b·3 *@"bytes".Buffer) Read (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadByte () (@"bytes".c·1 byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadBytes (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadFrom (@"bytes".r·4 @"io".Reader) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·4 *@"bytes".Buffer) ReadRune () (@"bytes".r·1 rune, @"bytes".size·2 int, @"bytes".err·3 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadString (@"bytes".delim·4 byte) (@"bytes".line·1 string, @"bytes".err·2 error) + func (@"bytes".b·1 *@"bytes".Buffer) Reset () + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") String () (? string) { if @"bytes".b·2 == nil { return "" }; return string(@"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:]) } + func (@"bytes".b·1 *@"bytes".Buffer) Truncate (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadByte () (? error) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadRune () (? error) + func (@"bytes".b·3 *@"bytes".Buffer) Write (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) WriteByte (@"bytes".c·3 byte) (? error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteRune (@"bytes".r·4 rune) (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteString (@"bytes".s·4 string "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteTo (@"bytes".w·4 @"io".Writer) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) @"bytes".grow (@"bytes".n·3 int) (? int) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x1") @"bytes".readSlice (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + type @"".Entry struct { Logger *@"".Logger; Data @"".Fields; Time @"time".Time; Level @"".Level; Message string } + func (@"".entry·1 *@"".Entry) Debug (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Debugf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Debugln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Error (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Errorf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Errorln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Fatal (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Fatalf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Fatalln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Info (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Infof (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Infoln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Panic (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Panicf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Panicln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Print (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Printf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Println (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·3 *@"".Entry) Reader () (? *@"bytes".Buffer, ? error) + func (@"".entry·3 *@"".Entry) String () (? string, ? error) + func (@"".entry·1 *@"".Entry) Warn (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Warnf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Warning (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Warningf (@"".format·2 string "esc:0x0", @"".args·3 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Warningln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·1 *@"".Entry) Warnln (@"".args·2 ...interface {} "esc:0x0") + func (@"".entry·2 *@"".Entry) WithField (@"".key·3 string, @"".value·4 interface {}) (? *@"".Entry) + func (@"".entry·2 *@"".Entry) WithFields (@"".fields·3 @"".Fields "esc:0x0") (? *@"".Entry) + func (@"".entry·1 *@"".Entry) @"".log (@"".level·2 @"".Level, @"".msg·3 string) + func (@"".entry·2 *@"".Entry "esc:0x0") @"".sprintlnn (@"".args·3 ...interface {} "esc:0x0") (? string) + func @"".NewEntry (@"".logger·2 *@"".Logger) (? *@"".Entry) { return (&@"".Entry{ Logger:@"".logger·2, Data:make(@"".Fields, 0x5) }) } + func @"".StandardLogger () (? *@"".Logger) { return @"".std } + func @"".SetOutput (@"".out·1 @"io".Writer) + func @"".SetFormatter (@"".formatter·1 @"".Formatter) + func @"".SetLevel (@"".level·1 @"".Level) + func @"".GetLevel () (? @"".Level) + func @"".AddHook (@"".hook·1 @"".Hook) + func @"".WithField (@"".key·2 string, @"".value·3 interface {}) (? *@"".Entry) + func @"".WithFields (@"".fields·2 @"".Fields "esc:0x0") (? *@"".Entry) + func @"".Debug (@"".args·1 ...interface {} "esc:0x0") + func @"".Print (@"".args·1 ...interface {} "esc:0x0") + func @"".Info (@"".args·1 ...interface {} "esc:0x0") + func @"".Warn (@"".args·1 ...interface {} "esc:0x0") + func @"".Warning (@"".args·1 ...interface {} "esc:0x0") + func @"".Error (@"".args·1 ...interface {} "esc:0x0") + func @"".Panic (@"".args·1 ...interface {} "esc:0x0") + func @"".Fatal (@"".args·1 ...interface {} "esc:0x0") + func @"".Debugf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Printf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Infof (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Warnf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Warningf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Errorf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Panicf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Fatalf (@"".format·1 string "esc:0x0", @"".args·2 ...interface {} "esc:0x0") + func @"".Debugln (@"".args·1 ...interface {} "esc:0x0") + func @"".Println (@"".args·1 ...interface {} "esc:0x0") + func @"".Infoln (@"".args·1 ...interface {} "esc:0x0") + func @"".Warnln (@"".args·1 ...interface {} "esc:0x0") + func @"".Warningln (@"".args·1 ...interface {} "esc:0x0") + func @"".Errorln (@"".args·1 ...interface {} "esc:0x0") + func @"".Panicln (@"".args·1 ...interface {} "esc:0x0") + func @"".Fatalln (@"".args·1 ...interface {} "esc:0x0") + const @"".DefaultTimestampFormat = "2006-01-02T15:04:05Z07:00" + type @"".JSONFormatter struct { TimestampFormat string } + func (@"".f·3 *@"".JSONFormatter "esc:0x0") Format (@"".entry·4 *@"".Entry) (? []byte, ? error) + func @"".New () (? *@"".Logger) { return (&@"".Logger{ Out:@"os".Stderr, Formatter:new(@"".TextFormatter), Hooks:make(@"".LevelHooks, 0x0), Level:@"".Level(0x4) }) } + func @"".ParseLevel (@"".lvl·3 string) (? @"".Level, ? error) + const @"".PanicLevel @"".Level = 0x0 + const @"".FatalLevel @"".Level = 0x1 + const @"".ErrorLevel @"".Level = 0x2 + const @"".WarnLevel @"".Level = 0x3 + const @"".InfoLevel @"".Level = 0x4 + const @"".DebugLevel @"".Level = 0x5 + type @"".StdLogger interface { Fatal(? ...interface {}); Fatalf(? string, ? ...interface {}); Fatalln(? ...interface {}); Panic(? ...interface {}); Panicf(? string, ? ...interface {}); Panicln(? ...interface {}); Print(? ...interface {}); Printf(? string, ? ...interface {}); Println(? ...interface {}) } + type @"".Termios struct { Iflag uint64; Oflag uint64; Cflag uint64; Lflag uint64; Cc [20]uint8; Pad_cgo_0 [4]byte; Ispeed uint64; Ospeed uint64 } + func @"".IsTerminal () (? bool) + type @"".TextFormatter struct { ForceColors bool; DisableColors bool; DisableTimestamp bool; FullTimestamp bool; TimestampFormat string; DisableSorting bool } + func (@"".f·3 *@"".TextFormatter "esc:0x0") Format (@"".entry·4 *@"".Entry) (? []byte, ? error) + func (@"".f·1 *@"".TextFormatter "esc:0x0") @"".appendKeyValue (@"".b·2 *@"bytes".Buffer, @"".key·3 string "esc:0x0", @"".value·4 interface {}) + func (@"".f·1 *@"".TextFormatter "esc:0x0") @"".printColored (@"".b·2 *@"bytes".Buffer, @"".entry·3 *@"".Entry, @"".keys·4 []string "esc:0x0") + func @"".init () + var @"time".months [12]string + var @"time".days [7]string + var @"time".Local *@"time".Location + var @"time".UTC *@"time".Location + var @"".std *@"".Logger + type @"os".dirInfo struct { @"os".buf []byte; @"os".nbuf int; @"os".bufp int } + type @"os".file struct { @"os".fd int; @"os".name string; @"os".dirinfo *@"os".dirInfo; @"os".nepipe int32 } + func (@"os".file·2 *@"os".file) @"os".close () (? error) + type @"os".FileMode uint32 + func (@"os".m·2 @"os".FileMode) IsDir () (? bool) { return @"os".m·2 & @"os".FileMode(0x80000000) != @"os".FileMode(0x0) } + func (@"os".m·2 @"os".FileMode) IsRegular () (? bool) { return @"os".m·2 & @"os".FileMode(0x8F000000) == @"os".FileMode(0x0) } + func (@"os".m·2 @"os".FileMode) Perm () (? @"os".FileMode) { return @"os".m·2 & @"os".FileMode(0x1FF) } + func (@"os".m·2 @"os".FileMode) String () (? string) + type @"os".FileInfo interface { IsDir() (? bool); ModTime() (? @"time".Time); Mode() (? @"os".FileMode); Name() (? string); Size() (? int64); Sys() (? interface {}) } + type @"os".File struct { @"os".? *@"os".file } + func (@"os".f·2 *@"os".File) Chdir () (? error) + func (@"os".f·2 *@"os".File) Chmod (@"os".mode·3 @"os".FileMode) (? error) + func (@"os".f·2 *@"os".File) Chown (@"os".uid·3 int, @"os".gid·4 int) (? error) + func (@"os".f·2 *@"os".File) Close () (? error) + func (@"os".f·2 *@"os".File "esc:0x0") Fd () (? uintptr) { if @"os".f·2 == nil { return 0xFFFFFFFFFFFFFFFF }; return uintptr(@"os".f·2.@"os".file.@"os".fd) } + func (@"os".f·2 *@"os".File "esc:0x1") Name () (? string) { return @"os".f·2.@"os".file.@"os".name } + func (@"os".f·3 *@"os".File) Read (@"os".b·4 []byte "esc:0x0") (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File) ReadAt (@"os".b·4 []byte "esc:0x0", @"os".off·5 int64) (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") Readdir (@"os".n·4 int) (@"os".fi·1 []@"os".FileInfo, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") Readdirnames (@"os".n·4 int) (@"os".names·1 []string, @"os".err·2 error) + func (@"os".f·3 *@"os".File) Seek (@"os".offset·4 int64, @"os".whence·5 int) (@"os".ret·1 int64, @"os".err·2 error) + func (@"os".f·3 *@"os".File) Stat () (@"os".fi·1 @"os".FileInfo, @"os".err·2 error) + func (@"os".f·2 *@"os".File "esc:0x0") Sync () (@"os".err·1 error) + func (@"os".f·2 *@"os".File) Truncate (@"os".size·3 int64) (? error) + func (@"os".f·3 *@"os".File) Write (@"os".b·4 []byte "esc:0x0") (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File) WriteAt (@"os".b·4 []byte "esc:0x0", @"os".off·5 int64) (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File) WriteString (@"os".s·4 string "esc:0x0") (@"os".ret·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".pread (@"os".b·4 []byte "esc:0x0", @"os".off·5 int64) (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".pwrite (@"os".b·4 []byte "esc:0x0", @"os".off·5 int64) (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".read (@"os".b·4 []byte "esc:0x0") (@"os".n·1 int, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".readdir (@"os".n·4 int) (@"os".fi·1 []@"os".FileInfo, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".readdirnames (@"os".n·4 int) (@"os".names·1 []string, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".seek (@"os".offset·4 int64, @"os".whence·5 int) (@"os".ret·1 int64, @"os".err·2 error) + func (@"os".f·3 *@"os".File "esc:0x0") @"os".write (@"os".b·4 []byte "esc:0x0") (@"os".n·1 int, @"os".err·2 error) + var @"os".Stderr *@"os".File + +$$ +_go_.6 0 0 0 644 260091 ` +go object darwin amd64 go1.4.2 X:precisestack + +! +go13ldbytes.a +fmt.aio.aos.a time.aencoding/json.a sync.a +log.asyscall.aruntime.a sort.astrings.abufio.aþ"".NewEntry ”eH‹ %H;awèëêHƒì(HH‰$HÇD$èH‹\$H‰\$ HH‰$èH‹L$H‰ÏHƒùtk1ÀèH‰L$H‰ $Hƒ<$tKH‹\$0H‰\$èH‹\$H‰$Hƒ<$t#Hƒ$H‹\$ H‰\$èH‹\$H‰\$8HƒÄ(É%ëÔ‰%묉ë‘ + 0runtime.morestack_noctxt:type."".Fields^runtime.makemap€type."".Entry’"runtime.newobject¼à runtime.duffzeroú.runtime.writebarrierptrÂ.runtime.writebarrierptr P"".autotmp_0001type.*"".Entry"".autotmp_0000type."".Fields "".~r1type.*"".Entry"".loggertype.*"".LoggerP™OPÐ@‹.4$0Tgclocals·a7c27d2bfcc924fa8a92b6b29b7218b1Tgclocals·e475e3c2360b557d64285d9b9a4e5064²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).ReaderÀ²eH‹ %HD$ðH;AwèëåHìH‹”$˜HÇ„$¨HÇ„$°H‹*Hƒý„H‹MH‹E H‰T$H‰D$XH‰$H‰L$PH‹Y ÿÓH‹T$H‹L$H‹D$ H‹\$(H‰\$@H‹\$0H‰\$HH‰T$`H‰T$xH‰L$hH‰Œ$€H‰D$pH‰„$ˆHH‰$èH‹L$H‰ÏHƒùtx1ÀèH‰L$8H‰ $Hƒ<$tXH‹\$xH‰\$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹\$8H‰œ$ H‹\$@H‰œ$¨H‹\$HH‰œ$°HÄÉ%량넉Eéöþÿÿ +*0runtime.morestack_noctxtÜ +ô"type.bytes.Buffer†"runtime.newobject°È runtime.duffzero¢2runtime.writebarrierslice@ "".autotmp_0003¯$type.*bytes.Buffer"".autotmp_0002$type.*bytes.Bufferbytes.buf·2/type.[]uint8 "".errŸtype.error"".serialized_type.[]uint8 "".~r1 type.error "".~r0$type.*bytes.Buffer"".entrytype.*"".Entry" ¡Ÿ àRBQ¾nTNPTgclocals·363b18caf0020ca418fd378dbb75c855Tgclocals·0719ac7e4405ec7094b2d696ead0af25²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).String žeH‹ %H;awèëêHƒì`HÇD$pHÇD$xHÇ„$€HÇ„$ˆH‹\$hH‰$èH‹D$H‹T$H‹t$H‰t$@HƒúH‰T$8t'HÇD$pHÇD$xH‰”$€H‰´$ˆHƒÄ`ÃHÇD$(HÇD$01íH9èu-HH‹ H‹CH‰L$pH‰D$xH‰”$€H‰´$ˆHƒÄ`ÃH‹xH‹PH‹HH9ÊrYH‹H‰ÖH)ÎH‰úH)ÊHƒút H‰ËHÃH‰ØH‰D$HH‰$H‰t$PH‰t$H‰T$XH‰T$èH‹t$@H‹T$8H‹L$H‹D$ éwÿÿÿè + 0runtime.morestack_noctxtœ$"".(*Entry).Readerè"go.string.""Ö2runtime.slicebytetostring’$runtime.panicslicePÀ + "".~r0otype.string "".errOtype.error "".~r10type.error "".~r0type.string"".entrytype.*"".Entry$À}¿ÀE¿ÀrÐ`D" '¸MÝTgclocals·896a3e2c9de7030cc72aa334f690557dTgclocals·44e348188e22fef6300f71ab26e45197²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ*"".(*Entry).WithField€êeH‹ %H;awèëêHƒìHHH‰$HÇD$èH‹D$H‹\$XH‰\$8H‹\$`H‰\$@H‹\$hH‰\$(H‹\$pH‰\$0HH‰$H‰D$ H‰D$H\$8H‰\$H\$(H‰\$èH‹\$PH‰$H‹\$ H‰\$èH‹\$H‰\$xHƒÄHà + 0runtime.morestack_noctxt:type."".Fields^runtime.makemapÆtype."".Fields”$runtime.mapassign1Ä,"".(*Entry).WithFields`"".autotmp_0016?"type.interface {}"".autotmp_0015type.string"".autotmp_0014Otype."".Fields "".~r2Ptype.*"".Entry"".value0"type.interface {} "".keytype.string"".entrytype.*"".Entryš À t¦.[Tgclocals·0723c8881b4d19cb48cb8887cfa073beTgclocals·396579fca70851935df9d21183ca82fd²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ,"".(*Entry).WithFieldsà Ì eH‹ %HD$H;AwèëåHìðHH‰$HÇD$èH‹\$H‰\$ H‹œ$øH‹kH¼$ 1ÀèHH‰$H‰l$Hœ$ H‰\$èH‹œ$ 1íH9ë„ÈH‹œ$¨Hƒû„xH‹ H‹CH‹œ$ Hƒû„XH‹3H‹kH‰L$pH‰D$xH‰t$PH‰´$H‰l$XH‰¬$˜H‰L$0H‰Œ$€H‰D$8H‰„$ˆHH‰$H‹\$ H‰\$Hœ$H‰\$Hœ$€H‰\$èHœ$ H‰$èH‹œ$ 1íH9ë…8ÿÿÿH‹Œ$H¼$ 1ÀèHH‰$H‰L$Hœ$ H‰\$èH‹œ$ 1íH9ë„ÈH‹œ$¨Hƒû„VH‹ H‹CH‹œ$ Hƒû„6H‹3H‹kH‰L$pH‰D$xH‰t$`H‰´$H‰l$hH‰¬$˜H‰L$@H‰Œ$€H‰D$HH‰„$ˆHH‰$H‹\$ H‰\$Hœ$H‰\$Hœ$€H‰\$èHœ$ H‰$èH‹œ$ 1íH9ë…8ÿÿÿHH‰$èH‹L$H‰ÏHƒùtw1ÀèH‰L$(H‰ $Hƒ<$tWH‹œ$øH‹+H‰l$èH‹\$(H‰$Hƒ<$t)Hƒ$H‹\$ H‰\$èH‹\$(H‰œ$HÄðÉ%ëΉ%렉녉éÃþÿÿ‰é£þÿÿ‰é¡ýÿÿ‰éýÿÿ* +*0runtime.morestack_noctxtJtype."".Fieldsnruntime.makemap¸Ø runtime.duffzeroÆtype."".Fieldsü&runtime.mapiterinittype."".Fieldsê$runtime.mapassign1Œ&runtime.mapiternextàØ runtime.duffzeroîtype."".Fields¤&runtime.mapiterinit¸type."".Fields’ $runtime.mapassign1´ &runtime.mapiternextè type."".Entryú "runtime.newobject¤ +à runtime.duffzeroî +.runtime.writebarrierptr¶ .runtime.writebarrierptr0à$"".autotmp_0027type.*"".Entry"".autotmp_0026"type.interface {}"".autotmp_0025ÿ"type.interface {}"".autotmp_0024"type.interface {}"".autotmp_0023type.string"".autotmp_0022Btype.map.iter[string]interface {}"".autotmp_0021type."".Fields"".autotmp_0020ß"type.interface {}"".autotmp_0019¿type.string"".autotmp_0018ŸBtype.map.iter[string]interface {}"".vß"type.interface {}"".kŸtype.string"".vÿ"type.interface {}"".k¿type.string"".dataŸtype."".Fields "".~r1 type.*"".Entry"".fieldstype."".Fields"".entrytype.*"".Entry"àÑßà<°4~"#Œh$ˆh$™.6G·ƒ#:$VTgclocals·fdf817463ca91d173b8e929c420286bdTgclocals·cbbe1bd73f3c341fc477038dafd9ade4²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ"".(*Entry).logàÔeH‹ %HD$ˆH;AwèëåHìøèH‹,$‹T$H‹L$H‹œ$H‰$Hƒ<$„SHƒ$HÇD$H‰¬$àH‰l$‰”$è‰T$H‰Œ$ðH‰L$ èH‹Œ$¶¬$@ˆi(H‰ $Hƒ<$„ìHƒ$0H‹œ$H‰\$H‹œ$H‰\$èH‹Œ$H‹H‹kH‰,$¶œ$ˆ\$H‰L$èH‹T$H‹L$ H‰L$pHƒúH‰T$h„bH‹œ$H‹+H‰,$Hƒ<$„[Hƒ$,èH‹ H‰L$X1íH9é„Hœ$¸HÇHÇCHœ$¸Hƒû„ÓHÇÅHÇÂH‰œ$ÈH‰¬$ÐH‰”$ØH‹\$hH‰$H‹\$pH‰\$èH‹T$H‹L$H‹œ$ÈH‰$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹H‹L$XH‰Œ$˜H‰ $H‰”$ H‰T$HHl$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$ H‹œ$ÐH‰\$(H‹œ$ØH‰\$0èH‹œ$H‹+H‰,$Hƒ<$„ÓHƒ$,èH‹œ$H‰$èH‹\$H‰\$PH‹L$H‹\$H‰œ$€HƒùH‰L$x„eH‹œ$H‹+H‰,$Hƒ<$„gHƒ$,èH‹ H‰L$X1íH9é„Hœ$¸HÇHÇCHœ$¸Hƒû„ßHÇÂHÇÅH‰œ$ÈH‰”$ÐH‰¬$ØH‹\$xH‰$H‹œ$€H‰\$èH‹T$H‹L$H‹œ$ÈH‰$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹H‹L$XH‰Œ$˜H‰ $H‰”$ H‰T$HHl$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$ H‹œ$ÐH‰\$(H‹œ$ØH‰\$0èH‹œ$H‹+H‰,$Hƒ<$„ÜHƒ$,èH‹œ$H‹+H‰,$Hƒ<$„¬Hƒ$,èH‹œ$H‹+H‰,$Hƒ<$„|Hƒ$,H QjèYYH…À…OH‹\$PH‰\$`H‹1íH9è„H‹´$H‹>Hƒÿ„åH7H<$H¥H¥H‹L$`H‰„$ˆH‰D$H‰Œ$H‰L$èH‹t$(H‹\$0H‰œ$€HƒþH‰t$x„H‹H‰D$X1íH9è„BHœ$¸HÇHÇCHœ$¸Hƒû„HÇÂHÇÁH‰œ$ÈH‰”$ÐH‰Œ$ØH‰4$H‹œ$€H‰\$èH‹L$H‹D$H‹œ$ÈH‰$H‰Œ$¨H‰L$H‰„$°H‰D$èH‹ H‹D$XH‰„$˜H‰$H‰Œ$ H‰L$HHl$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$ H‹œ$ÐH‰\$(H‹œ$ØH‰\$0趜$€ûwH‹„$HH‰$H‰D$è èHÄøÉéçþÿÿHH‰$HH‰\$HH‰\$èH‹t$xH‹\$H‰\$Xé‚þÿÿ‰éþÿÿHH‰$HH‰\$HH‰\$èH‹D$éÍýÿÿèHÄøÉ%éxýÿÿ‰%éHýÿÿ‰%éýÿÿ‰éüÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$Xéºûÿÿ‰%éûÿÿ‰%é!ûÿÿ‰é&úÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$XéÆùÿÿ‰%é™ùÿÿ‰%éùÿÿ‰%é¡øÿÿl +*0runtime.morestack_noctxtFtime.Nowþ0runtime.writebarrierfat3Œ4runtime.writebarrierstringÞ$"".LevelHooks.Fireâ$sync.(*Mutex).Lockð4go.itab.*os.File.io.WriterÞruntime.convI2EÈ2runtime.writebarrierifaceÖos.Stderr Jgo.string."Failed to fire hook: %v\n"– fmt.FprintfÞ (sync.(*Mutex).Unlock€ +$"".(*Entry).Readerž $sync.(*Mutex).Lock¬ 4go.itab.*os.File.io.Writer  runtime.convI2EŠ2runtime.writebarrieriface˜os.StderrâRgo.string."Failed to obtain reader, %v\n"Øfmt.Fprintf (sync.(*Mutex).Unlockè$sync.(*Mutex).Lock´.sync.(*Mutex).Unlock·fÄ"runtime.deferprocü>go.itab.*bytes.Buffer.io.Readeršio.Copyê4go.itab.*os.File.io.WriterÔruntime.convI2E¾2runtime.writebarrierifaceÌos.Stderr–Pgo.string."Failed to write to log, %v\n"Œfmt.FprintfÄtype.*"".Entryàruntime.gopanicð&runtime.deferreturnœtype.*os.File²type.io.WriterÊ4go.itab.*os.File.io.WriterÞ runtime.typ2Itab¢$type.*bytes.Buffer¸type.io.ReaderÐ>go.itab.*bytes.Buffer.io.Readerä runtime.typ2Itab„&runtime.deferreturnøtype.*os.FileŽtype.io.Writer¦4go.itab.*os.File.io.Writerº runtime.typ2Itab¤type.*os.Fileºtype.io.WriterÒ4go.itab.*os.File.io.Writeræ runtime.typ2Itab@ð."".autotmp_0047"type.interface {}"".autotmp_0046*type.*[1]interface {}"".autotmp_0045&type.[]interface {}"".autotmp_0044type.*uint8"".autotmp_0043type.*uint8"".autotmp_0042"type.interface {}"".autotmp_0041*type.*[1]interface {}"".autotmp_0040&type.[]interface {}"".autotmp_0039type.*uint8"".autotmp_0038Ÿ"type.interface {}"".autotmp_0036_&type.[]interface {}"".autotmp_0035¿type.*uint8"".autotmp_0033(type.[1]interface {}"".autotmp_0032¯$type.*bytes.Buffer"".autotmp_0031(type.[1]interface {}"".autotmp_0030(type.[1]interface {}"".autotmp_0028/type.time.Time "".errÿtype.error"".readerÏ$type.*bytes.Buffer "".errŸtype.error "".msg type.string"".leveltype."".Level"".entrytype.*"".Entry<"ð½Ûïð‰ïðâ°Ž”"i ;?$š$-$$$9r˜ C9  > >  \"Ž~œ$`œ$½ œ*?C\V>Tgclocals·65a30d49934626502b3d799f3cf8d99eTgclocals·22d60cc41efa02a0ac67663f051098e8²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).Debug ˆeH‹ %H;awèëêHƒì8H‹l$@H‹m¶](€ûrSH‹\$HH‰$H‹\$PH‰\$H‹\$XH‰\$èH‹L$H‹D$ H‹\$@H‰$ÆD$H‰L$(H‰L$H‰D$0H‰D$èHƒÄ8à + 0runtime.morestack_noctxt”fmt.Sprintö"".(*Entry).log@p"".autotmp_0067type.string"".args&type.[]interface {}"".entrytype.*"".Entrypio ØS +IGTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).Print –eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‰\$H‹\$8H‰\$H‹\$@H‰\$èHƒÄ à + 0runtime.morestack_noctxt„ "".(*Entry).Info@@"".args&type.[]interface {}"".entrytype.*"".Entry@0?Pä, + +ATgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ "".(*Entry).Info ˆeH‹ %H;awèëêHƒì8H‹l$@H‹m¶](€ûrSH‹\$HH‰$H‹\$PH‰\$H‹\$XH‰\$èH‹L$H‹D$ H‹\$@H‰$ÆD$H‰L$(H‰L$H‰D$0H‰D$èHƒÄ8à + 0runtime.morestack_noctxt”fmt.Sprintö"".(*Entry).log@p"".autotmp_0068type.string"".args&type.[]interface {}"".entrytype.*"".Entrypio ìS +IGTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ "".(*Entry).Warn ˆeH‹ %H;awèëêHƒì8H‹l$@H‹m¶](€ûrSH‹\$HH‰$H‹\$PH‰\$H‹\$XH‰\$èH‹L$H‹D$ H‹\$@H‰$ÆD$H‰L$(H‰L$H‰D$0H‰D$èHƒÄ8à + 0runtime.morestack_noctxt”fmt.Sprintö"".(*Entry).log@p"".autotmp_0069type.string"".args&type.[]interface {}"".entrytype.*"".Entrypio øS +IGTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ&"".(*Entry).Warning –eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‰\$H‹\$8H‰\$H‹\$@H‰\$èHƒÄ à + 0runtime.morestack_noctxt„ "".(*Entry).Warn@@"".args&type.[]interface {}"".entrytype.*"".Entry@0?P„, + +ATgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).Error ˆeH‹ %H;awèëêHƒì8H‹l$@H‹m¶](€ûrSH‹\$HH‰$H‹\$PH‰\$H‹\$XH‰\$èH‹L$H‹D$ H‹\$@H‰$ÆD$H‰L$(H‰L$H‰D$0H‰D$èHƒÄ8à + 0runtime.morestack_noctxt”fmt.Sprintö"".(*Entry).log@p"".autotmp_0070type.string"".args&type.[]interface {}"".entrytype.*"".Entrypio ŒS +IGTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).FatalÀ¢eH‹ %H;awèëêHƒì8H‹l$@H‹m¶](€ûrSH‹\$HH‰$H‹\$PH‰\$H‹\$XH‰\$èH‹L$H‹D$ H‹\$@H‰$ÆD$H‰L$(H‰L$H‰D$0H‰D$èHÇ$èHƒÄ8à + + 0runtime.morestack_noctxt”fmt.Sprintö"".(*Entry).logos.Exit@p"".autotmp_0071type.string"".args&type.[]interface {}"".entrytype.*"".Entrypvo ˜S  +IWTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).PanicàÒeH‹ %H;awèëêHƒìHH‹l$PH‹m¶](€ûrSH‹\$XH‰$H‹\$`H‰\$H‹\$hH‰\$èH‹L$H‹D$ H‹\$PH‰$ÆD$H‰L$8H‰L$H‰D$@H‰D$èH‹\$XH‰$H‹\$`H‰\$H‹\$hH‰\$èH‹\$H‰\$(H‹\$ H‰\$0HH‰$H\$(H‰\$èH\$H,$H‰ïH‰ÞH¥H¥è  + 0runtime.morestack_noctxt”fmt.Sprintö"".(*Entry).logºfmt.Sprintðtype.string–runtime.convT2EÆruntime.gopanic@"".autotmp_0073?type.string"".autotmp_0072type.string"".args&type.[]interface {}"".entrytype.*"".EntryÖð¦SqI&Tgclocals·9ff42bf311af152488d11f0f78c8d5ceTgclocals·29f0050a5ee7c2b9348a75428171d7de²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).DebugfÀ¦eH‹ %H;awèëêHì€H‹¬$ˆH‹m¶](€û‚H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$ èH‹\$(H‰\$HH‹\$0H‰\$PH\$XHÇHÇCH\$XHƒû„šHÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èH‹œ$ˆH‰$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$èHĀÉé_ÿÿÿ + 0runtime.morestack_noctxtîfmt.Sprintf¤type.stringÊruntime.convT2E¢2runtime.writebarrieriface€""".(*Entry).Debug`€"".autotmp_0078"type.interface {}"".autotmp_0076/&type.[]interface {}"".autotmp_0075otype.string"".autotmp_0074O(type.[1]interface {}"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry€®ÿ€à¸Žvn[!Tgclocals·e8d3240594e259421cd655d317fed5feTgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).InfofÀ¦eH‹ %H;awèëêHì€H‹¬$ˆH‹m¶](€û‚H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$ èH‹\$(H‰\$HH‹\$0H‰\$PH\$XHÇHÇCH\$XHƒû„šHÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èH‹œ$ˆH‰$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$èHĀÉé_ÿÿÿ + 0runtime.morestack_noctxtîfmt.Sprintf¤type.stringÊruntime.convT2E¢2runtime.writebarrieriface€ "".(*Entry).Info`€"".autotmp_0086"type.interface {}"".autotmp_0084/&type.[]interface {}"".autotmp_0083otype.string"".autotmp_0082O(type.[1]interface {}"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry€®ÿ€àÄŽvn[!Tgclocals·e8d3240594e259421cd655d317fed5feTgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).PrintfÀ¾eH‹ %H;awèëêHƒì0H‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$ H‹\$`H‰\$(èHƒÄ0à + 0runtime.morestack_noctxt¬""".(*Entry).Infof``"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry`D_`Ð@ +U Tgclocals·0a3395567ab7eee3bb936aced49af517Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".(*Entry).WarnfÀ¦eH‹ %H;awèëêHì€H‹¬$ˆH‹m¶](€û‚H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$ èH‹\$(H‰\$HH‹\$0H‰\$PH\$XHÇHÇCH\$XHƒû„šHÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èH‹œ$ˆH‰$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$èHĀÉé_ÿÿÿ + 0runtime.morestack_noctxtîfmt.Sprintf¤type.stringÊruntime.convT2E¢2runtime.writebarrieriface€ "".(*Entry).Warn`€"".autotmp_0094"type.interface {}"".autotmp_0092/&type.[]interface {}"".autotmp_0091otype.string"".autotmp_0090O(type.[1]interface {}"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry€®ÿ€àØŽvn[!Tgclocals·e8d3240594e259421cd655d317fed5feTgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ("".(*Entry).WarningfÀ¾eH‹ %H;awèëêHƒì0H‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$ H‹\$`H‰\$(èHƒÄ0à + 0runtime.morestack_noctxt¬""".(*Entry).Warnf``"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry`D_`ä@ +U Tgclocals·0a3395567ab7eee3bb936aced49af517Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).ErrorfÀ¦eH‹ %H;awèëêHì€H‹¬$ˆH‹m¶](€û‚H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$ èH‹\$(H‰\$HH‹\$0H‰\$PH\$XHÇHÇCH\$XHƒû„šHÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èH‹œ$ˆH‰$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$èHĀÉé_ÿÿÿ + 0runtime.morestack_noctxtîfmt.Sprintf¤type.stringÊruntime.convT2E¢2runtime.writebarrieriface€""".(*Entry).Error`€"".autotmp_0102"type.interface {}"".autotmp_0100/&type.[]interface {}"".autotmp_0099otype.string"".autotmp_0098O(type.[1]interface {}"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry€®ÿ€àìŽvn[!Tgclocals·e8d3240594e259421cd655d317fed5feTgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).FatalfÀÀeH‹ %H;awèëêHì€H‹¬$ˆH‹m¶](€û‚H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$ èH‹\$(H‰\$HH‹\$0H‰\$PH\$XHÇHÇCH\$XHƒû„§HÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èH‹œ$ˆH‰$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$èHÇ$èHĀÉéRÿÿÿ + 0runtime.morestack_noctxtîfmt.Sprintf¤type.stringÊruntime.convT2E¢2runtime.writebarrieriface€""".(*Entry).Fatalšos.Exit`€"".autotmp_0110"type.interface {}"".autotmp_0108/&type.[]interface {}"".autotmp_0107otype.string"".autotmp_0106O(type.[1]interface {}"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry€»ÿ€àøŽ vn[ Tgclocals·e8d3240594e259421cd655d317fed5feTgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).PanicfÀ¦eH‹ %H;awèëêHì€H‹¬$ˆH‹m¶](€û‚H‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$H‹œ$°H‰\$ èH‹\$(H‰\$HH‹\$0H‰\$PH\$XHÇHÇCH\$XHƒû„šHÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èH‹œ$ˆH‰$H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$èHĀÉé_ÿÿÿ + 0runtime.morestack_noctxtîfmt.Sprintf¤type.stringÊruntime.convT2E¢2runtime.writebarrieriface€""".(*Entry).Panic`€"".autotmp_0118"type.interface {}"".autotmp_0116/&type.[]interface {}"".autotmp_0115otype.string"".autotmp_0114O(type.[1]interface {}"".args0&type.[]interface {}"".formattype.string"".entrytype.*"".Entry€®ÿ€à†Žvn[!Tgclocals·e8d3240594e259421cd655d317fed5feTgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ&"".(*Entry).Debugln€îeH‹ %H;awèëêHƒìxH‹„$€H‹(¶](€û‚ùH‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$ H‰\$@H‹\$(H‰\$HH\$PHÇHÇCH\$PHƒû„—HÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$@H‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$0H‰L$H‰D$8H‰D$èH‹œ$€H‰$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èHƒÄxÉébÿÿÿ + 0runtime.morestack_noctxt¼*"".(*Entry).sprintlnnòtype.string˜runtime.convT2Eð2runtime.writebarrierifaceÎ""".(*Entry).Debug@ð "".autotmp_0126"type.interface {}"".autotmp_0124/&type.[]interface {}"".autotmp_0123otype.string"".autotmp_0122O(type.[1]interface {}"".args&type.[]interface {}"".entrytype.*"".Entryð•ïðÀ–"ù]n[Tgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).Infoln€îeH‹ %H;awèëêHƒìxH‹„$€H‹(¶](€û‚ùH‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$ H‰\$@H‹\$(H‰\$HH\$PHÇHÇCH\$PHƒû„—HÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$@H‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$0H‰L$H‰D$8H‰D$èH‹œ$€H‰$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èHƒÄxÉébÿÿÿ + 0runtime.morestack_noctxt¼*"".(*Entry).sprintlnnòtype.string˜runtime.convT2Eð2runtime.writebarrierifaceÎ "".(*Entry).Info@ð "".autotmp_0134"type.interface {}"".autotmp_0132/&type.[]interface {}"".autotmp_0131otype.string"".autotmp_0130O(type.[1]interface {}"".args&type.[]interface {}"".entrytype.*"".Entryð•ïðÀ¢"ù]n[Tgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ&"".(*Entry).Println –eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‰\$H‹\$8H‰\$H‹\$@H‰\$èHƒÄ à + 0runtime.morestack_noctxt„$"".(*Entry).Infoln@@"".args&type.[]interface {}"".entrytype.*"".Entry@0?P®, + +ATgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Entry).Warnln€îeH‹ %H;awèëêHƒìxH‹„$€H‹(¶](€û‚ùH‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$ H‰\$@H‹\$(H‰\$HH\$PHÇHÇCH\$PHƒû„—HÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$@H‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$0H‰L$H‰D$8H‰D$èH‹œ$€H‰$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èHƒÄxÉébÿÿÿ + 0runtime.morestack_noctxt¼*"".(*Entry).sprintlnnòtype.string˜runtime.convT2Eð2runtime.writebarrierifaceÎ "".(*Entry).Warn@ð "".autotmp_0142"type.interface {}"".autotmp_0140/&type.[]interface {}"".autotmp_0139otype.string"".autotmp_0138O(type.[1]interface {}"".args&type.[]interface {}"".entrytype.*"".Entryð•ïðÀ¶"ù]n[Tgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ*"".(*Entry).Warningln –eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‰\$H‹\$8H‰\$H‹\$@H‰\$èHƒÄ à + 0runtime.morestack_noctxt„$"".(*Entry).Warnln@@"".args&type.[]interface {}"".entrytype.*"".Entry@0?PÂ, + +ATgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ&"".(*Entry).Errorln€îeH‹ %H;awèëêHƒìxH‹„$€H‹(¶](€û‚ùH‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$ H‰\$@H‹\$(H‰\$HH\$PHÇHÇCH\$PHƒû„—HÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$@H‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$0H‰L$H‰D$8H‰D$èH‹œ$€H‰$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èHƒÄxÉébÿÿÿ + 0runtime.morestack_noctxt¼*"".(*Entry).sprintlnnòtype.string˜runtime.convT2Eð2runtime.writebarrierifaceÎ""".(*Entry).Error@ð "".autotmp_0150"type.interface {}"".autotmp_0148/&type.[]interface {}"".autotmp_0147otype.string"".autotmp_0146O(type.[1]interface {}"".args&type.[]interface {}"".entrytype.*"".Entryð•ïðÀÊ"ù]n[Tgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ&"".(*Entry).Fatalln ˆeH‹ %H;awèëêHƒìxH‹„$€H‹(¶](€û‚ùH‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$ H‰\$@H‹\$(H‰\$HH\$PHÇHÇCH\$PHƒû„¤HÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$@H‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$0H‰L$H‰D$8H‰D$èH‹œ$€H‰$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èHÇ$èHƒÄxÉéUÿÿÿ + 0runtime.morestack_noctxt¼*"".(*Entry).sprintlnnòtype.string˜runtime.convT2Eð2runtime.writebarrierifaceÎ""".(*Entry).Fatalèos.Exit@ð "".autotmp_0158"type.interface {}"".autotmp_0156/&type.[]interface {}"".autotmp_0155otype.string"".autotmp_0154O(type.[1]interface {}"".args&type.[]interface {}"".entrytype.*"".Entryð¢ïðÐÖ"ù ]n[ Tgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ&"".(*Entry).Panicln€îeH‹ %H;awèëêHƒìxH‹„$€H‹(¶](€û‚ùH‰$H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$èH‹\$ H‰\$@H‹\$(H‰\$HH\$PHÇHÇCH\$PHƒû„—HÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$@H‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$0H‰L$H‰D$8H‰D$èH‹œ$€H‰$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$èHƒÄxÉébÿÿÿ + 0runtime.morestack_noctxt¼*"".(*Entry).sprintlnnòtype.string˜runtime.convT2Eð2runtime.writebarrierifaceÎ""".(*Entry).Panic@ð "".autotmp_0166"type.interface {}"".autotmp_0164/&type.[]interface {}"".autotmp_0163otype.string"".autotmp_0162O(type.[1]interface {}"".args&type.[]interface {}"".entrytype.*"".Entryð•ïðÀä"ù]n[Tgclocals·b29a376724b9675f7c9e576a6dabc1e0Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ*"".(*Entry).sprintlnn€òeH‹ %H;awèëêHƒì(HÇD$PHÇD$XH‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$èH‹t$H‹L$ H‰ÊHÿÊH9ÑrH‰t$PH‰T$XHƒÄ(Ãè  + 0runtime.morestack_noctxt”fmt.Sprintlnæ$runtime.panicslice`P"".autotmp_0172type.int "".~r1@type.string"".args&type.[]interface {}"".entrytype.*"".EntryPWOP€ø,,( +I7Tgclocals·9f0d5ba6770c4a1ed4fa771547e96df1Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ""".StandardLogger H‹H‰\$à "".std "".~r0type.*"".LoggerTgclocals·a7a3692b8e27e823add69ec4239ba55fTgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".SetOutput ŒeH‹ %H;awèëêHƒìH‹H‰$Hƒ<$„ŠHƒ$,èH‹H‰$Hƒ<$teHƒ$,H QjèYYH…Àu?H‹H‰$Hƒ<$t$H‹\$ H‰\$H‹\$(H‰\$èèHƒÄÉ%ëÓèHƒÄÉ%ë’‰%éjÿÿÿ + 0runtime.morestack_noctxt: "".stdl$sync.(*Mutex).Lockz "".std¨.sync.(*Mutex).Unlock·f¸"runtime.deferprocÔ "".stdœ2runtime.writebarrieriface¨&runtime.deferreturnÐ&runtime.deferreturn 0 "".outtype.io.Writer*0?:/0/0Ð"$ -+   5›Tgclocals·20671cc48303dfd2b9d73bba3d1850b7Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".SetFormatter –eH‹ %H;awèëêHƒìH‹H‰$Hƒ<$„Hƒ$,èH‹H‰$Hƒ<$tjHƒ$,H QjèYYH…ÀuDH‹H‰$Hƒ<$t)Hƒ$H‹\$ H‰\$H‹\$(H‰\$èèHƒÄÉ%ëÎèHƒÄÉ%ë‰%éeÿÿÿ + 0runtime.morestack_noctxt: "".stdl$sync.(*Mutex).Lockz "".std¨.sync.(*Mutex).Unlock·f¸"runtime.deferprocÔ "".std¦2runtime.writebarrieriface²&runtime.deferreturnÚ&runtime.deferreturn 0"".formatter"type."".Formatter*0??/0/0Ð"2 -0   5›Tgclocals·20671cc48303dfd2b9d73bba3d1850b7Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".SetLevelÀ¶eH‹ %H;awèëêHƒìH‹H‰$Hƒ<$tfHƒ$,èH‹H‰$Hƒ<$tAHƒ$,H QjèYYH…ÀuH‹¶l$@ˆk(èHƒÄÃèHƒÄÉ%붉%ë‘ + 0runtime.morestack_noctxt: "".stdd$sync.(*Mutex).Lockr "".std .sync.(*Mutex).Unlock·f°"runtime.deferprocÌ "".stdê&runtime.deferreturn€&runtime.deferreturn"".leveltype."".Level*; + @-  +1oTgclocals·5d05a78f811f5c3f62710534cdce0004Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".GetLevelÀÀeH‹ %H;awèëêHƒìÆD$H‹H‰$Hƒ<$tfHƒ$,èH‹H‰$Hƒ<$tAHƒ$,H QjèYYH…ÀuH‹¶k(@ˆl$èHƒÄÃèHƒÄÉ%붉%ë‘ + 0runtime.morestack_noctxtD "".stdn$sync.(*Mutex).Lock| "".stdª.sync.(*Mutex).Unlock·fº"runtime.deferprocÖ "".stdô&runtime.deferreturnŠ&runtime.deferreturn "".~r0type."".Level*@ + N-  +6jTgclocals·7c868751a5d2fdd881613692c78d6476Tgclocals·0115f8d53b75c1696444f08ad03251d9¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".AddHook€ìeH‹ %H;awèëêHƒìH‹H‰$Hƒ<$t~Hƒ$,èH‹H‰$Hƒ<$tYHƒ$,H QjèYYH…Àu3H‹H‹kH‰,$H‹\$ H‰\$H‹\$(H‰\$èèHƒÄÃèHƒÄÉ%랉%évÿÿÿ + 0runtime.morestack_noctxt: "".stdd$sync.(*Mutex).Lockr "".std .sync.(*Mutex).Unlock·f°"runtime.deferprocÌ "".stdŽ""".LevelHooks.Addš&runtime.deferreturn°&runtime.deferreturn 0"".hooktype."".Hook*0;7/0 +/0À\-(  1Tgclocals·20671cc48303dfd2b9d73bba3d1850b7Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".WithFieldàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ èH‹\$(H‰\$XHƒÄ0à + 0runtime.morestack_noctxt: "".stdœ,"".(*Logger).WithFieldP` "".~r2@type.*"".Entry"".value "type.interface {} "".keytype.string`F_p +rV +M#Tgclocals·66ae2244d17a3b89653cba445a520071Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".WithFields †eH‹ %H;awèëêHƒìH‹H‰$H‹\$ H‰\$èH‹\$H‰\$(HƒÄà + 0runtime.morestack_noctxt: "".std`."".(*Logger).WithFields 0 "".~r1type.*"".Entry"".fieldstype."".Fields0(/P †6 +/!Tgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Debug šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ$"".(*Logger).Debug0@"".args&type.[]interface {}@2?P. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Print šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ$"".(*Logger).Print0@"".args&type.[]interface {}@2?Pš. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Info šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ""".(*Logger).Info0@"".args&type.[]interface {}@2?P¤. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Warn šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ""".(*Logger).Warn0@"".args&type.[]interface {}@2?P®. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Warning šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ("".(*Logger).Warning0@"".args&type.[]interface {}@2?P¸. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Error šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ$"".(*Logger).Error0@"".args&type.[]interface {}@2?PÂ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Panic šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ$"".(*Logger).Panic0@"".args&type.[]interface {}@2?PÌ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Fatal šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ$"".(*Logger).Fatal0@"".args&type.[]interface {}@2?PÖ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".DebugfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°&"".(*Logger).DebugfP`"".args &type.[]interface {}"".formattype.string`F_pàB +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".PrintfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°&"".(*Logger).PrintfP`"".args &type.[]interface {}"".formattype.string`F_pêB +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".InfofàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°$"".(*Logger).InfofP`"".args &type.[]interface {}"".formattype.string`F_pôB +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".WarnfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°$"".(*Logger).WarnfP`"".args &type.[]interface {}"".formattype.string`F_pþB +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".WarningfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°*"".(*Logger).WarningfP`"".args &type.[]interface {}"".formattype.string`F_pˆB +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".ErrorfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°&"".(*Logger).ErrorfP`"".args &type.[]interface {}"".formattype.string`F_p’B +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".PanicfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°&"".(*Logger).PanicfP`"".args &type.[]interface {}"".formattype.string`F_pœB +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".FatalfàÂeH‹ %H;awèëêHƒì0H‹H‰$H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(èHƒÄ0à + 0runtime.morestack_noctxt: "".std°&"".(*Logger).FatalfP`"".args &type.[]interface {}"".formattype.string`F_p¦B +WTgclocals·f271231f400e778e0f59be25f7a26a56Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Debugln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ("".(*Logger).Debugln0@"".args&type.[]interface {}@2?P°. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Println šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ("".(*Logger).Println0@"".args&type.[]interface {}@2?Pº. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Infoln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ&"".(*Logger).Infoln0@"".args&type.[]interface {}@2?PÄ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Warnln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ&"".(*Logger).Warnln0@"".args&type.[]interface {}@2?PÎ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Warningln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ,"".(*Logger).Warningln0@"".args&type.[]interface {}@2?PØ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Errorln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ("".(*Logger).Errorln0@"".args&type.[]interface {}@2?Pâ. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Panicln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ("".(*Logger).Panicln0@"".args&type.[]interface {}@2?Pì. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ"".Fatalln šeH‹ %H;awèëêHƒì H‹H‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + 0runtime.morestack_noctxt: "".stdˆ("".(*Logger).Fatalln0@"".args&type.[]interface {}@2?Pö. +C Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ*"".prefixFieldClashes€ æ eH‹ %H;awèëêHƒì`HH‹ H‹kHH‰$H‹\$hH‰\$H‰L$PH‰L$H‰l$XH‰l$èH‹L$ ¶\$(Hƒù„€û„¢HH‹+H‰l$@H‹kH‰l$HHH‹ H‹kHH‰$H‹\$hH‰\$H‰L$PH‰L$H‰l$XH‰l$èH‹\$ Hƒû„OH‹+H‰l$0H‹kH‰l$8HH‰$H‹\$hH‰\$H\$@H‰\$H\$0H‰\$èHH‹ H‹kHH‰$H‹\$hH‰\$H‰L$PH‰L$H‰l$XH‰l$èH‹L$ ¶\$(Hƒù„¹€û„¢HH‹+H‰l$@H‹kH‰l$HHH‹ H‹kHH‰$H‹\$hH‰\$H‰L$PH‰L$H‰l$XH‰l$èH‹\$ Hƒû„FH‹+H‰l$0H‹kH‰l$8HH‰$H‹\$hH‰\$H\$@H‰\$H\$0H‰\$èHH‹ H‹kHH‰$H‹\$hH‰\$H‰L$PH‰L$H‰l$XH‰l$èH‹L$ ¶\$(Hƒù„°€û„žHH‹+H‰l$@H‹kH‰l$HHH‹ H‹kHH‰$H‹\$hH‰\$H‰L$PH‰L$H‰l$XH‰l$èH‹\$ HƒûtDH‹+H‰l$0H‹kH‰l$8HH‰$H‹\$hH‰\$H\$@H‰\$H\$0H‰\$èHƒÄ`É븉éIÿÿÿ‰é³þÿÿ‰é@þÿÿ‰éªýÿÿ‰é7ýÿÿ: + 0runtime.morestack_noctxt: go.string."time"Vtype."".Fields¤4runtime.mapaccess2_faststrì.go.string."fields.time"œ go.string."time"¸type."".Fields†4runtime.mapaccess1_faststrÔtype."".Fields¢$runtime.mapassign1°go.string."msg"Ìtype."".Fieldsš4runtime.mapaccess2_faststrâ,go.string."fields.msg"’go.string."msg"®type."".Fieldsü4runtime.mapaccess1_faststrÊtype."".Fields˜$runtime.mapassign1¦"go.string."level"Âtype."".Fields 4runtime.mapaccess2_faststrØ 0go.string."fields.level"ˆ +"go.string."level"¤ +type."".Fieldsò +4runtime.mapaccess1_faststr¸ type."".Fields† $runtime.mapassign1À"".autotmp_0193$type.*interface {}"".autotmp_0192$type.*interface {}"".autotmp_0190"type.interface {}"".autotmp_0189type.string"".autotmp_0188type.string"".autotmp_0187type.string"".autotmp_0186"type.interface {}"".autotmp_0185type.string"".autotmp_0184type.string"".autotmp_0183type.string"".autotmp_0182_"type.interface {}"".autotmp_0181type.string"".autotmp_0180?type.string"".autotmp_0179type.string"".datatype."".FieldsÀñ¿À4ÀXDP ¢P ¢P ž*QqN<qN<qJ>Tgclocals·15395a9df917b4c9aa74d5c6c7e1ebf4Tgclocals·cfe802ef097eb87dc1d2f379757036b4º/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.goþ""".LevelHooks.AddÀ¸eH‹ %HD$¨H;AwèëåHìØH‹œ$ðH‰$H‹œ$èH‹[(ÿÓH‹T$H‹D$H‹L$H‰”$ÀH‰„$ÈH‰Œ$ÐH‰Œ$¸1ÉH‰„$°H‰D$HH‰”$¨H‰ÐH‹l$HH9ézH‰D$X¶(H‰L$P@ˆl$G@ˆl$FHH‰$H‹œ$àH‰\$H\$FH‰\$èH‹\$Hƒû„5H‹H‹KH‹[H‰”$H‰Œ$˜H‰œ$ H‰ØH)ËHƒû}FHH‰$H‰T$`H‰T$H‰L$hH‰L$H‰D$pH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$hH‰D$pH‰ÓH‰T$`H‰ÍHkíHëH‰$H‹œ$èH‰\$H‹œ$ðH‰\$èH‹\$`H‰\$xH‹\$hH‰œ$€H‹\$pH‰œ$ˆHH‰$H‹œ$àH‰\$H\$GH‰\$H\$xH‰\$èH‹D$XHÿÀH‹L$PHÿÁH‹l$HH9錆þÿÿHÄØÉéÄþÿÿ +*0runtime.morestack_noctxtt +Ú$type."".LevelHooksš$runtime.mapaccess1¤type.[]"".Hook„"runtime.growslice¬2runtime.writebarrieriface‚$type."".LevelHooksÖ$runtime.mapassign10°"".autotmp_0204ïtype.[]"".Hook"".autotmp_0202ÿtype.*"".Level"".autotmp_0201Ÿtype.int"".autotmp_0200type.int"".autotmp_0199¿type.[]"".Hook"".autotmp_0198type.[]"".Hook"".autotmp_0197£type."".Level"".autotmp_0196¡type."".Level"".autotmp_0195_type.[]"".Level"".autotmp_0194/type.[]"".Level"".hooktype."".Hook"".hooks$type."".LevelHooks"°ò¯°  &"~Ï :’ÉU6Tgclocals·a02efc190d1c7709e4c72531a85b968dTgclocals·5347b08d42ef15c0183233bde05091ab²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.goþ$"".LevelHooks.Fire œeH‹ %HD$àH;AwèëåHì HÇ„$ÀHÇ„$ȶœ$°ˆ\$'HH‰$H‹œ$¨H‰\$H\$'H‰\$èH‹\$Hƒû„ H‹H‹CH‹kH‰”$ˆH‰„$H‰¬$˜H‰¬$€1ÉH‰D$xH‰D$(H‰T$pH‰ÐH‹l$(H9é—H‰D$8Hƒø„¨H‹H‹hH‰L$0H‰T$`H‰l$hH‹œ$¸H‰\$H‰l$HH‰,$H‰T$@H‹Z ÿÓH‹L$H‹D$H‰D$XHƒùH‰L$PtH‰Œ$ÀH‰„$ÈHÄ ÃH‹D$8H‹L$0HƒÀHÿÁH‹l$(H9éŒiÿÿÿHÇ„$ÀHÇ„$ÈHĠÉéQÿÿÿ‰éîþÿÿ + +*0runtime.morestack_noctxt’$type."".LevelHooksÒ$runtime.mapaccess1š +PÀ"".autotmp_0219type."".Hook"".autotmp_0218Ïtype.*"".Hook"".autotmp_0217ïtype.int"".autotmp_0216ßtype.int"".autotmp_0214_type.[]"".Hook"".autotmp_0213/type.[]"".Hook"".autotmp_0212ñtype."".Level "".errŸtype.error"".hook¿type."".Hook "".~r20type.error"".entry type.*"".Entry"".leveltype."".Level"".hooks$type."".LevelHooks&"Àž¿À>¿À 6:´; h¥ƒTgclocals·7ce35767da505d40dfb8f85871f02969Tgclocals·7e4aab61b173caafc98b406c57151fa1²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.goþ4"".(*JSONFormatter).Format€øeH‹ %H„$ðþÿÿH;AwèëâHìHÇ„$¨HÇ„$°HÇ„$¸HÇ„$ÀHÇ„$ÈH‹¬$ H‹]HƒûtH‹H‰ØHH‰$H‰ÃHƒÃH‰\$èH‹\$H‰\$@H‹œ$ H‹kH¼$@1ÀèHH‰$H‰l$Hœ$@H‰\$èH‹œ$@1íH9ë„H‹œ$HHƒû„´H‹ H‹CH‹œ$@Hƒû„”H‹+H‰¬$ˆH‹kH‰¬$H‰L$HH‰D$PH‰Œ$¨H‰ $H‰„$°H‰D$è‹\$HH‰$H‹œ$¨H‰\$H‹œ$°H‰\$èH‹´$ˆH‹¬$H‹T$H‰T$hH‹L$ H‰L$p¶\$(€û„‚H‰´$èH‰¬$ðH‰ $H‹Z ÿÓH‹\$H‰œ$ØH‹\$H‰œ$àHH‰$Hœ$ØH‰\$èH‹\$H‰œ$ÈH‹\$H‰œ$ÐHH‰$H‹\$@H‰\$Hœ$èH‰\$Hœ$ÈH‰\$èHœ$@H‰$èH‹œ$@1íH9ë…sþÿÿH‹\$@H‰$èH‹œ$˜Hƒû„˜H‹ H‹CHƒøuHH‹ H‹CHH‹+H‰¬$èH‹sH‰´$ðH‹¼$ Hƒÿ„FHwH<$H¥H¥H¥H‰L$xH‰L$H‰„$€H‰D$ èH‹\$(H‰œ$ØH‹\$0H‰œ$àHH‰$Hœ$ØH‰\$èH‹\$H‰œ$ÈH‹\$H‰œ$ÐHH‰$H‹\$@H‰\$Hœ$èH‰\$Hœ$ÈH‰\$èHH‹+H‰¬$èH‹kH‰¬$ðHH‰$H‹œ$ H‰\$Hƒ|$„IHƒD$0èH‹\$H‰œ$ÈH‹\$H‰œ$ÐHH‰$H‹\$@H‰\$Hœ$èH‰\$Hœ$ÈH‰\$èHH‹+H‰¬$èH‹kH‰¬$ðH‹œ$ ¶k(@ˆ,$èH‹\$H‰œ$ØH‹\$H‰œ$àHH‰$Hœ$ØH‰\$èH‹\$H‰œ$ÈH‹\$H‰œ$ÐHH‰$H‹\$@H‰\$Hœ$èH‰\$Hœ$ÈH‰\$èH‹D$@H H‰Œ$¨H‰ $H‰„$°H‰D$èH‹l$H‰¬$øH‹L$H‰Œ$H‹D$ H‰„$H‹T$(H‹t$0H‰´$ HƒúH‰”$˜„ Hœ$¸HÇHÇCHœ$¸Hƒû„ðHÇÅHÇÁH‰œ$(H‰¬$0H‰Œ$8H‰$H‰t$èH‹L$H‹D$H‹œ$(H‰$H‰Œ$¨H‰L$H‰„$°H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$(H‰\$H‹œ$0H‰\$H‹œ$8H‰\$ èH‹L$(H‹D$0HÇ„$¨HÇ„$°HÇ„$¸H‰Œ$ÀH‰„$ÈHÄÉé ÿÿÿH‰êH‰ÃH)ËHƒû}OHH‰$H‰”$H‰T$H‰Œ$H‰L$H‰„$ H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÏHÿÇH +Æ +H‰”$¨H‰¼$°H‰„$¸HÇ„$ÀHÇ„$ÈHÄÉ%é«üÿÿ‰é³ûÿÿ‰éaûÿÿH‹Œ$¨H‹„$°H‰´$èH‰¬$ðH‰L$XH‰Œ$ÈH‰D$`H‰„$ÐHH‰$H‹\$@H‰\$Hœ$èH‰\$Hœ$ÈH‰\$èéªúÿÿ‰éeùÿÿ‰éEùÿÿZ +00runtime.morestack_noctxtøtype."".Fields¢runtime.makemapìØ runtime.duffzeroútype."".Fields°&runtime.mapiterinitª$runtime.efacethashÀtype.error†$runtime.assertE2I2¢ +àtype.stringŒruntime.convT2EÎtype."".Fields¨ $runtime.mapassign1Ê &runtime.mapiternextŒ +*"".prefixFieldClashesØ +Jgo.string."2006-01-02T15:04:05Z07:00"ô + go.string."time"š  time.Time.FormatÜ type.stringˆ runtime.convT2EÊ type."".Fields¤$runtime.mapassign1²go.string."msg"îtype.string¾runtime.convT2E€type."".FieldsÚ$runtime.mapassign1è"go.string."level"À"".Level.String‚type.string®runtime.convT2Eðtype."".FieldsÊ$runtime.mapassign1âtype."".Fieldsž*encoding/json.Marshalîruntime.convI2EØ2runtime.writebarrierifaceæ`go.string."Failed to marshal fields to JSON, %v"Úfmt.Errorf type.[]uint8’"runtime.growsliceðtype."".FieldsÊ$runtime.mapassign1p J"".autotmp_0250type.int"".autotmp_0249type.int"".autotmp_0248ÿtype.[]uint8"".autotmp_0247"type.interface {}"".autotmp_0245Ï&type.[]interface {}"".autotmp_0244type.uint32"".autotmp_0242"type.interface {}"".autotmp_0241Ï"type.interface {}"".autotmp_0238¯(type.[1]interface {}"".autotmp_0237type."".Fields"".autotmp_0236"type.interface {}"".autotmp_0235type.string"".autotmp_0234type.string"".autotmp_0233"type.interface {}"".autotmp_0232type.string"".autotmp_0231"type.interface {}"".autotmp_0230type.string"".autotmp_0229type.string"".autotmp_0228"type.interface {}"".autotmp_0227type.string"".autotmp_0226"type.interface {}"".autotmp_0225ïtype.string"".autotmp_0224Ïtype.string"".autotmp_0223ŸBtype.map.iter[string]interface {}"".autotmp_0222type."".Fields "".errïtype.error"".serialized¯type.[]uint8$"".timestampFormat¯type.string"".vï"type.interface {}"".vÏtype.error"".v"type.interface {}"".ktype.string"".dataŸtype."".Fields "".~r2Ptype.error "".~r1 type.[]uint8"".entrytype.*"".Entry"".f,type.*"".JSONFormatter*% Ñ Ÿ ©Ÿ ŸÀna>’ˆŸ $ß›¸c £   ^ dG}}4N! ‡7NMN37N*¨vœÜ Tgclocals·91781b467bdd49442cfecbf49067c104Tgclocals·859165c97b106654e8e33715962a8293Ä/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.goþ "".New ˜eH‹ %H;awèëêHƒìXHH‰$èH‹\$H‰\$0HH‰$HÇD$èH‹\$H‰\$(HH‰$èH‹L$H‰ÏH‰L$ Hƒù„G1ÀèH‹1íH9è„÷H‰ $Hƒ<$„ÜH‹ H‰D$HH‰D$H‰L$PH‰L$èH‹1íH9èt|H‹\$ H‰$Hƒ<$tcHƒ$H‹L$0H‰D$8H‰D$H‰L$@H‰L$èH‹\$ H‰$Hƒ<$t'Hƒ$H‹\$(H‰\$èH‹D$ Æ@(H‰D$`HƒÄXÉ%ëЉ%ë”HH‰$HH‰\$HH‰\$èH‹D$éRÿÿÿ‰%éÿÿÿHH‰$HH‰\$HH‰\$èH‹L$ H‹D$éÒþÿÿ‰é²þÿÿ. + 0runtime.morestack_noctxt:*type."".TextFormatterL"runtime.newobjectn$type."".LevelHooks’runtime.makemap´type."".LoggerÆ"runtime.newobject‚ä runtime.duffzero4go.itab.*os.File.io.WriterÒos.Stderr„2runtime.writebarrieriface’Lgo.itab.*"".TextFormatter."".Formatter†2runtime.writebarrierifaceÎ.runtime.writebarrierptr¦,type.*"".TextFormatter¼"type."".FormatterÔLgo.itab.*"".TextFormatter."".Formatterè runtime.typ2Itab¢type.*os.File¸type.io.WriterÐ4go.itab.*os.File.io.Writerä runtime.typ2Itab° +"".autotmp_0265type.*uint8"".autotmp_0263otype.*"".Logger"".autotmp_0262_$type."".LevelHooks"".autotmp_0261O,type.*"".TextFormatter "".~r0type.*"".Logger°£¯°’Ð:\#./` +-2"%#_A$M]Tgclocals·e9c510091732f30fce387c9f1e977134Tgclocals·87b2493d48b5216b7adc3dac672105cb´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ,"".(*Logger).WithFieldÀ°eH‹ %H;awèëêHƒìHH‹\$PH‰\$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ èH‹\$(H‰\$xHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxtNtype."".Fieldsrruntime.makemap”type."".Entry¦"runtime.newobjectØà runtime.duffzerož.runtime.writebarrierptræ.runtime.writebarrierptrÒ*"".(*Entry).WithField`"".autotmp_0273type.*"".Entry"".autotmp_0272type.*"".Entry"".autotmp_0271type.*"".Entry"".autotmp_0270type."".Fields"".logger/type.*"".Logger "".~r2Ptype.*"".Entry"".value0"type.interface {} "".keytype.string"".loggertype.*"".Loggerá$  t†8<$68Tgclocals·0a89f44bdb06c71b1e3fde611d9babf4Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ."".(*Logger).WithFields€æeH‹ %H;awèëêHƒì0H‹\$8H‰\$HH‰$HÇD$èH‹\$H‰\$(HH‰$èH‹L$H‰ÏHƒù„ƒ1ÀèH‰L$ H‰ $Hƒ<$tcH‹\$H‰\$èH‹\$ H‰$Hƒ<$t;Hƒ$H‹\$(H‰\$èH‹\$ H‰$H‹\$@H‰\$èH‹\$H‰\$HHƒÄ0É%뼉%딉évÿÿÿ + 0runtime.morestack_noctxtNtype."".Fieldsrruntime.makemap”type."".Entry¦"runtime.newobjectØà runtime.duffzero–.runtime.writebarrierptrÞ.runtime.writebarrierptrŽ,"".(*Entry).WithFields0`"".autotmp_0277type.*"".Entry"".autotmp_0276type.*"".Entry"".autotmp_0275type.*"".Entry"".autotmp_0274type."".Fields"".logger/type.*"".Logger "".~r1 type.*"".Entry"".fieldstype."".Fields"".loggertype.*"".Logger`¿_`&€€æ88$:Tgclocals·ab01a2d55089ff50c402006df1039c39Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).DebugfàÊeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€$"".(*Entry).Debugf`"".autotmp_0280type.*"".Entry"".autotmp_0279type.*"".Entry"".autotmp_0278type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerî'°ˆ Ø'E<$@1Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).InfofàÊeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€""".(*Entry).Infof`"".autotmp_0283type.*"".Entry"".autotmp_0282type.*"".Entry"".autotmp_0281type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerî'°” Ø'E<$@1Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).PrintfÀ°eH‹ %H;awèëêHƒìHH‹\$PH‰\$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxtNtype."".Fieldsrruntime.makemap”type."".Entry¦"runtime.newobjectØà runtime.duffzerož.runtime.writebarrierptræ.runtime.writebarrierptræ$"".(*Entry).Printf`"".autotmp_0286type.*"".Entry"".autotmp_0285type.*"".Entry"".autotmp_0284type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerá$  Ý$8<$@.Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).WarnfàÊeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€""".(*Entry).Warnf`"".autotmp_0289type.*"".Entry"".autotmp_0288type.*"".Entry"".autotmp_0287type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerî'°¨ Ø'E<$@1Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ*"".(*Logger).WarningfàÊeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€""".(*Entry).Warnf`"".autotmp_0292type.*"".Entry"".autotmp_0291type.*"".Entry"".autotmp_0290type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerî'°´ Ø'E<$@1Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).ErrorfàÊeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€$"".(*Entry).Errorf`"".autotmp_0295type.*"".Entry"".autotmp_0294type.*"".Entry"".autotmp_0293type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerî'°À Ø'E<$@1Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).Fatalf€äeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„µ1ÀèH‰L$8H‰ $Hƒ<$„ŽH‹\$0H‰\$èH‹\$8H‰$Hƒ<$tfHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHÇ$èHƒÄHÉ%둉%éfÿÿÿ‰éDÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€$"".(*Entry).Fatalfšos.Exit`"".autotmp_0298type.*"".Entry"".autotmp_0297type.*"".Entry"".autotmp_0296type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerû*ÀÌ Ø *E<$@ATgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).PanicfàÊeH‹ %H;awèëêHƒìHH‹D$P¶X(€û‚ØH‰D$0HH‰$HÇD$èH‹\$H‰\$@HH‰$èH‹L$H‰ÏHƒù„¨1ÀèH‰L$8H‰ $Hƒ<$„H‹\$0H‰\$èH‹\$8H‰$Hƒ<$tYHƒ$H‹\$@H‰\$èH‹\$8H‰$H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ H‹\$xH‰\$(èHƒÄHÉ%랉%ésÿÿÿ‰éQÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero¸.runtime.writebarrierptr€.runtime.writebarrierptr€$"".(*Entry).Panicf`"".autotmp_0301type.*"".Entry"".autotmp_0300type.*"".Entry"".autotmp_0299type."".Fields"".logger/type.*"".Logger"".args0&type.[]interface {}"".formattype.string"".loggertype.*"".Loggerî'°Ú Ø'E<$@1Tgclocals·fc96ae191c2547955912928601e85959Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).Debug ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ""".(*Entry).Debug@p "".autotmp_0304type.*"".Entry"".autotmp_0303type.*"".Entry"".autotmp_0302type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopæ ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ""".(*Logger).Info ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ "".(*Entry).Info@p "".autotmp_0307type.*"".Entry"".autotmp_0306type.*"".Entry"".autotmp_0305type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopò ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).Print€úeH‹ %H;awèëêHƒì8H‹\$@H‰\$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxtNtype."".Fieldsrruntime.makemap”type."".Entry¦"runtime.newobjectØà runtime.duffzero–.runtime.writebarrierptrÞ.runtime.writebarrierptr¶ "".(*Entry).Info@p "".autotmp_0310type.*"".Entry"".autotmp_0309type.*"".Entry"".autotmp_0308type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÉop€þÅ88$,&Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ""".(*Logger).Warn ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ "".(*Entry).Warn@p "".autotmp_0313type.*"".Entry"".autotmp_0312type.*"".Entry"".autotmp_0311type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖop† ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ("".(*Logger).Warning ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ "".(*Entry).Warn@p "".autotmp_0316type.*"".Entry"".autotmp_0315type.*"".Entry"".autotmp_0314type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖop’ ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).Error ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ""".(*Entry).Error@p "".autotmp_0319type.*"".Entry"".autotmp_0318type.*"".Entry"".autotmp_0317type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopž ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).FatalÀ´eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tzH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tRHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHÇ$èHƒÄ8É%륉%ézÿÿÿ‰é\ÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ""".(*Entry).Fatalêos.Exit@p "".autotmp_0322type.*"".Entry"".autotmp_0321type.*"".Entry"".autotmp_0320type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".Loggerpãop" ª À "E8$,9Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ$"".(*Logger).Panic ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ""".(*Entry).Panic@p "".autotmp_0325type.*"".Entry"".autotmp_0324type.*"".Entry"".autotmp_0323type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖop¸ ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ("".(*Logger).Debugln ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ&"".(*Entry).Debugln@p "".autotmp_0328type.*"".Entry"".autotmp_0327type.*"".Entry"".autotmp_0326type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopÄ ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).Infoln ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ$"".(*Entry).Infoln@p "".autotmp_0331type.*"".Entry"".autotmp_0330type.*"".Entry"".autotmp_0329type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopÐ ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ("".(*Logger).Println€úeH‹ %H;awèëêHƒì8H‹\$@H‰\$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxtNtype."".Fieldsrruntime.makemap”type."".Entry¦"runtime.newobjectØà runtime.duffzero–.runtime.writebarrierptrÞ.runtime.writebarrierptr¶&"".(*Entry).Println@p "".autotmp_0334type.*"".Entry"".autotmp_0333type.*"".Entry"".autotmp_0332type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÉop€ÜÅ88$,&Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ&"".(*Logger).Warnln ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ$"".(*Entry).Warnln@p "".autotmp_0337type.*"".Entry"".autotmp_0336type.*"".Entry"".autotmp_0335type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopä ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ,"".(*Logger).Warningln ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ$"".(*Entry).Warnln@p "".autotmp_0340type.*"".Entry"".autotmp_0339type.*"".Entry"".autotmp_0338type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopð ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ("".(*Logger).Errorln ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ&"".(*Entry).Errorln@p "".autotmp_0343type.*"".Entry"".autotmp_0342type.*"".Entry"".autotmp_0341type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖopü ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ("".(*Logger).FatallnÀ´eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tzH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tRHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHÇ$èHƒÄ8É%륉%ézÿÿÿ‰é\ÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ&"".(*Entry).Fatallnêos.Exit@p "".autotmp_0346type.*"".Entry"".autotmp_0345type.*"".Entry"".autotmp_0344type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".Loggerpãop" ˆ À "E8$,9Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ("".(*Logger).Panicln ”eH‹ %H;awèëêHƒì8H‹D$@¶X(€û‚ÀH‰D$ HH‰$HÇD$èH‹\$H‰\$0HH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$(H‰ $Hƒ<$tmH‹\$ H‰\$èH‹\$(H‰$Hƒ<$tEHƒ$H‹\$0H‰\$èH‹\$(H‰$H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$èHƒÄ8É%벉%늉élÿÿÿ + 0runtime.morestack_noctxthtype."".FieldsŒruntime.makemap®type."".EntryÀ"runtime.newobjectòà runtime.duffzero°.runtime.writebarrierptrø.runtime.writebarrierptrÐ&"".(*Entry).Panicln@p "".autotmp_0349type.*"".Entry"".autotmp_0348type.*"".Entry"".autotmp_0347type."".Fields"".logger/type.*"".Logger"".args&type.[]interface {}"".loggertype.*"".LoggerpÖop– ÀE8$,)Tgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·0442504e096b61648fffc20fe86cec66´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.goþ"".Level.StringÀ®¶\$€ûws€ûuHH‹+H‰l$H‹kH‰l$ÀûuHH‹+H‰l$H‹kH‰l$ÀûuHH‹+H‰l$H‹kH‰l$ÃHH‹+H‰l$H‹kH‰l$ÀûuHH‹+H‰l$H‹kH‰l$ÀûuHH‹+H‰l$H‹kH‰l$Àûu¦HH‹+H‰l$H‹kH‰l$Ã$"go.string."panic"`"go.string."fatal"œ"go.string."error"Î&go.string."unknown"Š&go.string."warning"Æ go.string."info"‚"go.string."debug"0 "".~r0type.string"".leveltype."".Levelàà:" +"Tgclocals·a73fd2a0c6f832642aa9216fd9c5e6beTgclocals·3280bececceccd33cb74587feedb1f9f´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.goþ"".ParseLevel ”eH‹ %HD$èH;AwèëåHì˜HÇ„$¸HÇ„$ÀH‹Œ$ H‹œ$¨H‰L$PH‰ $H‰\$XH‰\$H-LD$L‰ÇH‰îH¥H¥èH‹L$PH‹D$XH‹\$ HƒûOHƒøu`H‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹L$PH‹D$X¶\$ €ût(Æ„$°HÇ„$¸HÇ„$ÀHĘÃHƒøu`H‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹L$PH‹D$X¶\$ €ût(Æ„$°HÇ„$¸HÇ„$ÀHĘÃHƒøuVH‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €ût(Æ„$°HÇ„$¸HÇ„$ÀHĘÃÆD$?H‹œ$ H‰\$`H‹œ$¨H‰\$hH\$pHÇHÇCH\$pHƒû„ÞHÇÂHÇÁH‰œ$€H‰”$ˆH‰Œ$HH‰$H\$`H‰\$èH‹L$H‹D$H‹œ$€H‰$H‰L$@H‰L$H‰D$HH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ èH‹L$(H‹D$0¶\$?ˆœ$°H‰Œ$¸H‰„$ÀHĘÉéÿÿÿH‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹L$PH‹D$XH‹\$ HƒûÊHƒøu`H‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹L$PH‹D$X¶\$ €ût(Æ„$°HÇ„$¸HÇ„$ÀHĘÃHƒø…,þÿÿH‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €û„úýÿÿÆ„$°HÇ„$¸HÇ„$ÀHĘÃHƒøu`H‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹L$PH‹D$X¶\$ €ût(Æ„$°HÇ„$¸HÇ„$ÀHĘÃHƒø…býÿÿH‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €ûu é/ýÿÿ2 +*0runtime.morestack_noctxtÀ"go.string."fatal"è"runtime.cmpstringÆ"go.string."debug"î runtime.eqstring’"go.string."error"º runtime.eqstringÞ"go.string."fatal"† runtime.eqstringÈtype.stringîruntime.convT2EÌ 2runtime.writebarrierifaceÚ Pgo.string."not a valid logrus Level: %q"Î +fmt.ErrorfØ "go.string."panic"€ "runtime.cmpstringÞ  go.string."info"†  runtime.eqstring²"go.string."panic"Ú runtime.eqstringò go.string."warn"š runtime.eqstringÆ&go.string."warning"î runtime.eqstringP°"".autotmp_0357¯"type.interface {}"".autotmp_0355/&type.[]interface {}"".autotmp_0354type.string"".autotmp_0352otype.string"".autotmp_0351O(type.[1]interface {}"".l±type."".Level "".~r20type.error "".~r1 type."".Level "".lvltype.stringf"°Ô¯°e¯°[¯°Ÿ¯°©¯°c¯°e¯°C PH:W>( >(4(¢=>(<( +>(8 ,s´pY­`jTgclocals·db0f6b31ff49b3f025910ec03f9742faTgclocals·626b2db390378ab5b89c88b48426687f´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.goþ"".IsTerminalÀ´eH‹ %HD$èH;AwèëåHì˜H‹H|$P1ÀèHÇ$6H‰\$HÇD$tH@H\$PH‰\$HÇD$ HÇD$(HÇD$0èH‹\$HHƒûtÆ„$ HĘÃÆ„$ ëî + +*0runtime.morestack_noctxtJsyscall.StdoutbÜ runtime.duffzeroâ syscall.Syscall6°"".termiostype."".Termios "".~r0type.bool"°m¯° "" E& +p0Tgclocals·a7a3692b8e27e823add69ec4239ba55fTgclocals·3280bececceccd33cb74587feedb1f9fÎ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.goþ"".init·1€êeH‹ %H;awèëêHƒì@èH‹$‹L$H‹D$HH‰$HÇD$H‰T$(H‰T$‰L$0‰L$H‰D$8H‰D$ èè¶$ˆHƒÄ@à + 0runtime.morestack_noctxt6time.Now^ "".baseTimestampº0runtime.writebarrierfat3Ä"".IsTerminalØ"".isTerminal€"".autotmp_0361/type.time.Time€Z €6G +fTgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·0528ab8f76149a707fd2f0025c2178a3Ä/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.goþ"".miniTSàÚeH‹ %H;awèëêHƒì H‹H‰$‹‰\$H‹H‰\$èH‹l$I¹³”Ö&è .H‰èI÷éI‰ÐIÁøHÁý?I)èL‰D$(HƒÄ à + 0runtime.morestack_noctxt: "".baseTimestampN "".baseTimestampd "".baseTimestampxtime.Since@ "".~r0type.int@R?p +@V +;5Tgclocals·a7a3692b8e27e823add69ec4239ba55fTgclocals·3280bececceccd33cb74587feedb1f9fÄ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.goþ4"".(*TextFormatter).Format ŽeH‹ %H„$8ÿÿÿH;AwèëâHìHHÇ„$`HÇ„$hHÇ„$pHÇ„$xHÇ„$€H‹¬$XH‹]HƒûtH‹H‰ØHH‰$HÇD$H‰D$èH‹T$H‹L$ H‹D$(H‰”$°H‰Œ$¸H‰„$ÀH‹œ$XH‹kH¼$ø1ÀèHH‰$H‰l$Hœ$øH‰\$èH‹œ$ø1íH9ë„2H‹œ$øHƒû„àH‹+H‰l$pH‹kH‰l$xH‹”$°H‹Œ$¸H‹œ$ÀH‰ØH)ËHƒû}OHH‰$H‰”$àH‰T$H‰Œ$èH‰L$H‰„$ðH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$èH‰„$ðH‰ÓH‰”$àH‰ÍHkíHëH‰$H‹\$pH‰\$H‹\$xH‰\$èH‹”$àH‹Œ$èH‹„$ðH‰”$°H‰Œ$¸H‰„$ÀHœ$øH‰$èH‹œ$ø1íH9ë…ÎþÿÿH‹¬$P¶]€ûu+H‹œ$°H‰$H‹œ$¸H‰\$H‹œ$ÀH‰\$èHH‰$èH‹|$H‰ûHƒÿ„[1ÀèH‰\$PH‹œ$XH‹kH‰,$èH‹”$XH‹„$P€=„HÇÁ¶€û…ê€ù…á1ÉH‹XHƒûuHhHH‰ïH‰ÞH¥H¥€ù„ÅH‰$H‹\$PH‰\$H‰T$H‹œ$°H‰\$H‹œ$¸H‰\$ H‹œ$ÀH‰\$(èH‹\$PH‰$ÆD$ +èH‹L$PH‹qH‹QH‹AH9ÂrVH‹ H‰×H)ÇH‰òH)ÂHƒút H‰ÃHËH‰ÙH‰Œ$`H‰¼$hH‰”$pHÇ„$xHÇ„$€HÄHÃè ¶X€û…·Hƒú„ÔHjH$H‰ßH‰îH¥H¥H¥HhH\$H‰ßH‰îH¥H¥èH‹\$(H‰œ$ H‹\$0H‰œ$¨HH‰$Hœ$ H‰\$èH\$Hl$ H‰ïH‰ÞH¥H¥H‹œ$PH‰$H‹t$PH‰t$H5Hl$H‰ïH¥H¥èH‹”$X¶j(@ˆ,$èH‹\$H‰œ$ H‹\$H‰œ$¨HH‰$Hœ$ H‰\$èH\$Hl$ H‰ïH‰ÞH¥H¥H‹œ$PH‰$H‹t$PH‰t$H5Hl$H‰ïH¥H¥èHH‰$H‹œ$XH‰\$Hƒ|$„qHƒD$0èH\$Hl$ H‰ïH‰ÞH¥H¥H‹œ$PH‰$H‹t$PH‰t$H5Hl$H‰ïH¥H¥èH‹Œ$°H‹„$¸H‹œ$ÀH‰œ$Ø1ÒH‰„$ÐH‰D$@H‰Œ$ÈH‰ÈH‹l$@H9êwýÿÿH‰D$XHƒø„ÆH‹H‹@H‰T$HH‰L$`H‰D$hHH‰$H‹œ$XH‹kH‰l$H‰Œ$€H‰L$H‰„$ˆH‰D$èH‹\$ HƒûtfH‹ H‹kH‹œ$PH‰$H‹\$PH‰\$H‹\$`H‰\$H‹\$hH‰\$H‰Œ$H‰L$ H‰¬$˜H‰l$(èH‹D$XH‹T$HHƒÀHÿÂé!ÿÿÿ‰ë–‰é3ÿÿÿ‰%éƒþÿÿ‰é%ýÿÿ¶X€û…üÿÿHÇÁéüÿÿ1Ééêûÿÿ‰éžûÿÿ‰éúÿÿL +00runtime.morestack_noctxtøtype.[]string¦"runtime.makesliceªØ runtime.duffzero¸type."".Fieldsî&runtime.mapiterinit°type.[]string¢"runtime.growsliceÐ4runtime.writebarrierstringÒ&runtime.mapiternextð sort.Stringsþ "type.bytes.Buffer +"runtime.newobject +È runtime.duffzeroö +*"".prefixFieldClashes¢ "".isTerminal– Jgo.string."2006-01-02T15:04:05Z07:00"º @"".(*TextFormatter).printColoredà 2bytes.(*Buffer).WriteByteÂ$runtime.panicsliceÌ time.Time.FormatŽtype.stringºruntime.convT2Eœ go.string."time"¾D"".(*TextFormatter).appendKeyValueè"".Level.Stringªtype.stringÖruntime.convT2E¸"go.string."level"ÚD"".(*TextFormatter).appendKeyValueètype.string¸runtime.convT2Ešgo.string."msg"¼D"".(*TextFormatter).appendKeyValue¤type."".FieldsŒ4runtime.mapaccess1_faststrÂD"".(*TextFormatter).appendKeyValuep2"".autotmp_0386type.uint64"".autotmp_0385type.uint64"".autotmp_0383type.string"".autotmp_0382ßtype.*string"".autotmp_0381type.int"".autotmp_0380type.int"".autotmp_0375type.int"".autotmp_0374type.[]string"".autotmp_0373ï"type.interface {}"".autotmp_0372type.string"".autotmp_0371type.[]string"".autotmp_0370type.string"".autotmp_0369Ïtype.string"".autotmp_0368ÿtype.[]string"".autotmp_0367ŸBtype.map.iter[string]interface {}"".autotmp_0365Ïtype.[]string"".autotmp_0364ÿtype.int "".keyÏtype.string"".bï$type.*bytes.Buffer"".k¯type.string"".keys¯type.[]string "".~r2Ptype.error "".~r1 type.[]uint8"".entrytype.*"".Entry"".f,type.*"".TextFormatter%º°’xa]së$+.% + ?s ·†qc   P’dšWAOC¢„ E +7W7qê[pTgclocals·71f03f9031d1479950f850ff028ecf79Tgclocals·cfaa0e57cb7ad41983cddbe4e85a52caÄ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.goþ@"".(*TextFormatter).printColored€(è'eH‹ %H„$ÀþÿÿH;AwèëâHìÀH‹Œ$ضi(@€ý‡‡ @€ý…e HÇD$P¶i(@ˆ,$èH‹L$H‹D$H‰Œ$H‰ $H‰„$H‰D$èH‹t$PL‹„$ÈH‹”$ÐH‹L$H‹\$Hƒû‚÷HÇÀA¶X€û…ØH‰”$ˆH‰t$pH‰Œ$ H‰Œ$ðH‰„$¨H‰„$øèH‹$H‰\$hH‹H‰„$€1íH9è„IH¼$€1ÀèHœ$€Hƒû„!HÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$H\$pH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$Hœ$ðH‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$H\$hH‰\$èH‹L$H‹D$H‹œ$ HƒÃ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$H‹œ$ØH‰\$Hƒ|$„ÓHƒD$0èH‹L$H‹D$H‹œ$ HƒÃ0H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹Œ$ˆH‹„$€H‰„$ÀH‰$H‰Œ$ÈH‰L$HHl$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$ H‹œ$(H‰\$(H‹œ$0H‰\$0èH‹Œ$àH‹„$èH‹œ$ðH‰œ$H1ÒH‰„$@H‰D$XH‰Œ$8H‰ÈH‹l$XH9ê…H‰D$xHƒø„ÆH‹H‹@H‰T$`H‰Œ$°H‰„$¸HH‰$H‹œ$ØH‹kH‰l$H‰Œ$H‰L$H‰„$H‰D$èH‹\$ Hƒû„YH‹ H‹kH‰Œ$H‰¬$˜H‹œ$ÐH‰œ$ˆH‹\$PH‰\$hH‹œ$°H‰œ$àH‹œ$¸H‰œ$èH‹H‰„$€1íH9è„´H¼$P1ÀèHœ$PHƒû„ŒHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$H\$hH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$Hœ$àH‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹œ$ HƒÃ H‰$H‹œ$H‰\$H‹œ$˜H‰\$èH‹Œ$ˆH‹„$€H‰„$ÀH‰$H‰Œ$ÈH‰L$HHl$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$ H‹œ$(H‰\$(H‹œ$0H‰\$0èH‹D$xH‹T$`HƒÀHÿÂH‹l$XH9êŒ{ýÿÿHÄÀÉémþÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰œ$€éþÿÿ‰é ýÿÿ‰é3ýÿÿ‰%é!üÿÿ‰éØúÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰œ$€é}úÿÿH‰”$ˆH‰t$pH‰Œ$ H‰Œ$ðH‰„$¨H‰„$øH‹¼$ØHƒÿ„ÄHoH<$H‰îH¥H¥H¥IhH\$H‰ßH‰îH¥H¥èH‹\$(H‰œ$àH‹\$0H‰œ$èH‹H‰„$€1íH9è„-H¼$€1ÀèHœ$€Hƒû„HÇÁHÇÂH‰œ$ H‰Œ$(H‰”$0HH‰$H\$pH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$Hœ$ðH‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$Hœ$àH‰\$èH‹L$H‹D$H‹œ$ HƒÃ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èHH‰$H‹œ$ØH‰\$Hƒ|$„´HƒD$0èH‹L$H‹D$H‹œ$ HƒÃ0H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹Œ$ˆH‹„$€H‰„$ÀH‰$H‰Œ$ÈH‰L$HHl$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$ H‹œ$(H‰\$(H‹œ$0H‰\$0èéÙùÿÿ‰%é@ÿÿÿ‰éôýÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰œ$€é™ýÿÿ‰é5ýÿÿè @€ý„‘öÿÿHÇD$P"éŒöÿÿ@€ý„yöÿÿ@€ýuHÇD$P!énöÿÿ@€ýuÎHÇD$P%éZöÿÿ€ +00runtime.morestack_noctxt®"".Level.Stringþstrings.ToUpperÞ"".miniTSþ>go.itab.*bytes.Buffer.io.WriterÂà runtime.duffzeroÀtype.intæruntime.convT2EÐ2runtime.writebarrierifaceÞtype.stringŠruntime.convT2Eü2runtime.writebarrierifaceŠtype.int°runtime.convT2E¢ 2runtime.writebarrieriface° type.string€ +runtime.convT2Eò +2runtime.writebarrierifaceÒ Tgo.string."\x1b[%dm%s\x1b[0m[%04d] %-44s "È fmt.Fprintf¼type."".Fields¤4runtime.mapaccess1_faststrò>go.itab.*bytes.Buffer.io.Writer¶è runtime.duffzero´type.intÚruntime.convT2EÄ2runtime.writebarrierifaceÒtype.stringþruntime.convT2Eð2runtime.writebarrierifaceÎ2runtime.writebarrieriface®Bgo.string." \x1b[%dm%s\x1b[0m=%v"¤fmt.FprintfŽ$type.*bytes.Buffer¤type.io.Writer¼>go.itab.*bytes.Buffer.io.WriterÐ runtime.typ2ItabÄ$type.*bytes.BufferÚtype.io.Writerò>go.itab.*bytes.Buffer.io.Writer† runtime.typ2Itabú time.Time.Format¼>go.itab.*bytes.Buffer.io.Writer€à runtime.duffzeroþtype.int¤runtime.convT2EŽ2runtime.writebarrierifaceœtype.stringÈruntime.convT2Eº 2runtime.writebarrierifaceÈ type.stringô runtime.convT2Eæ!2runtime.writebarrierifaceô!type.stringÄ"runtime.convT2E¶#2runtime.writebarrieriface–$Pgo.string."\x1b[%dm%s\x1b[0m[%s] %-44s "Œ%fmt.FprintfÊ%$type.*bytes.Bufferà%type.io.Writerø%>go.itab.*bytes.Buffer.io.WriterŒ& runtime.typ2ItabÈ&$runtime.panicslice`€`"".autotmp_0437"type.interface {}"".autotmp_0436"type.interface {}"".autotmp_0434&type.[]interface {}"".autotmp_0433type.*uint8"".autotmp_0432type.string"".autotmp_0431type.*string"".autotmp_0430type.int"".autotmp_0429type.int"".autotmp_0428"type.interface {}"".autotmp_0427"type.interface {}"".autotmp_0426"type.interface {}"".autotmp_0425"type.interface {}"".autotmp_0424*type.*[4]interface {}"".autotmp_0423&type.[]interface {}"".autotmp_0422type.*uint8"".autotmp_0421"type.interface {}"".autotmp_0420"type.interface {}"".autotmp_0419"type.interface {}"".autotmp_0418ß"type.interface {}"".autotmp_0416¿&type.[]interface {}"".autotmp_0415ÿtype.*uint8"".autotmp_0412type.string"".autotmp_0411type.int"".autotmp_0410$type.*bytes.Buffer"".autotmp_0409ß(type.[3]interface {}"".autotmp_0408"type.interface {}"".autotmp_0407type.string"".autotmp_0406type.[]string"".autotmp_0405¿type.string"".autotmp_0404type.string"".autotmp_0403type.int"".autotmp_0402$type.*bytes.Buffer"".autotmp_0401(type.[4]interface {}"".autotmp_0400¯type.int"".autotmp_0399Ÿtype.string"".autotmp_0398Ÿtype.int"".autotmp_0397ï$type.*bytes.Buffer"".autotmp_0396(type.[4]interface {}"".autotmp_0395ÿtype.string"".autotmp_0394ßtype.string"".vß"type.interface {}"".kŸtype.string"".levelText¿type.string"".levelColorßtype.int"".keys0type.[]string"".entry type.*"".Entry"".b$type.*bytes.Buffer"".f,type.*"".TextFormatter%€× ÿ€ƒ€‚Â- + + +eçcqÚAM±T  +  +  NV˜„±®ÛºkV[ ´@]Tgclocals·40341f41d3f930f66258415e27eda61dTgclocals·3d6acc133f31046c5a57fcb873eb249fÄ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.goþ"".needsQuotingÀºeH‹ %H;awèëêHƒì@H‹\$HH‰\$0H‹\$PH‰\$81ÉH‰L$(H‹\$0H‰$H‹\$8H‰\$H‰L$èH‹L$‹D$ Hƒùt2ƒøa|ƒøz~ŃøA|ƒøZ~»ƒø0|ƒø9~±ƒø-t¬ƒø.t§ÆD$XHƒÄ@ÃÆD$XHƒÄ@à + 0runtime.morestack_noctxtœ&runtime.stringiter20€"".autotmp_0456/type.int"".autotmp_0455type.string "".~r1 type.bool"".texttype.string€x€  öG( +  +MSTgclocals·d7e8a62d22b1cde6d92b17a55c33fe8fTgclocals·8d600a433c6aaa81a4fe446d95c5546bÄ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.goþD"".(*TextFormatter).appendKeyValueÀÀeH‹ %H„$xÿÿÿH;AwèëâHìH‹œ$H‰$H‹œ$ H‰\$H‹œ$(H‰\$èH‹œ$H‰$ÆD$=èH‹Œ$0H‹„$8H‰Œ$ÀH‰ $H‰„$ÈH‰D$è‹\$û´\ÿà…HH‰$H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹T$H‰T$`H‹L$ H‰L$h¶\$(€û„ÐH‰$H‰L$èH‹¬$H‹T$`H‹L$h¶\$€ût1H‰,$H‰T$H‰L$èH‹œ$H‰$ÆD$ èHÄÃH‰l$XH‰”$ÐH‰Œ$ØH‹H‰D$P1íH9è„Hœ$àHÇHÇCHœ$àHƒû„âHÇÂHÇÁH‰œ$ðH‰”$øH‰Œ$HH‰$Hœ$ÐH‰\$èH‹L$H‹D$H‹œ$ðH‰$H‰Œ$°H‰L$H‰„$¸H‰D$èH‹L$XH‹D$PH‰„$ H‰$H‰Œ$¨H‰L$HHl$H‰ïH‰ÞH¥H¥H‹œ$ðH‰\$ H‹œ$øH‰\$(H‹œ$H‰\$0èé«þÿÿ‰éÿÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$Pé·þÿÿHH‰$H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹T$H‰”$€H‹L$ H‰Œ$ˆ¶\$(€û„ÒH‰ $H‹Z ÿÓH‹L$H‹D$H‰Œ$H‰ $H‰„$˜H‰D$èH‹Œ$¶\$€ût(H‰ $H‹œ$H‰\$H‹œ$˜H‰\$èé¯ýÿÿH‰L$XH‹H‰D$P1íH9è„Hœ$àHÇHÇCHœ$àHƒû„ãHÇÁHÇÂH‰œ$ðH‰Œ$øH‰”$H‹œ$€H‰$H‹œ$ˆH‰\$èH‹L$H‹D$H‹œ$ðH‰$H‰Œ$°H‰L$H‰„$¸H‰D$èH‹L$XH‹D$PH‰„$ H‰$H‰Œ$¨H‰L$HHl$H‰ïH‰ÞH¥H¥H‹œ$ðH‰\$ H‹œ$øH‰\$(H‹œ$H‰\$0èé‡üÿÿ‰éÿÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$Pé¶þÿÿH‹œ$ÀH‰\$pH‹œ$ÈH‰\$xH‹œ$H‰\$XH‹H‰D$P1íH9è„ÇHœ$àHÇHÇCHœ$àHƒû„—HÇÂHÇÁH‰”$øH‰Œ$H‰œ$ðH‰$H‹\$pH‰\$H‹\$xH‰\$èH‹L$XH‹D$PH‰„$ H‰$H‰Œ$¨H‰L$H‹œ$ðH‰\$H‹œ$øH‰\$H‹œ$H‰\$ èéKûÿÿ‰ébÿÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$PéÿÿÿR +00runtime.morestack_noctxt˜6bytes.(*Buffer).WriteStringÄ2bytes.(*Buffer).WriteByte $runtime.efacethashÎtype.string”$runtime.assertE2T2ô"".needsQuotingÒ6bytes.(*Buffer).WriteStringþ2bytes.(*Buffer).WriteByteÆ>go.itab.*bytes.Buffer.io.Writer’type.string¾runtime.convT2E¨2runtime.writebarrierifaceügo.string."%q"ò fmt.Fprintf˜ +$type.*bytes.Buffer® +type.io.WriterÆ +>go.itab.*bytes.Buffer.io.WriterÚ + runtime.typ2Itab† type.errorÌ $runtime.assertE2I2´ +€ "".needsQuotingê 6bytes.(*Buffer).WriteStringŒ>go.itab.*bytes.Buffer.io.Writer†runtime.convI2Eð2runtime.writebarrierifaceÄgo.string."%q"ºfmt.Fprintfà$type.*bytes.Bufferötype.io.WriterŽ>go.itab.*bytes.Buffer.io.Writer¢ runtime.typ2Itabœ>go.itab.*bytes.Buffer.io.Writer”2runtime.writebarrieriface²fmt.FprintØ$type.*bytes.Bufferîtype.io.Writer†>go.itab.*bytes.Buffer.io.Writerš runtime.typ2Itab`8"".autotmp_0482*type.*[1]interface {}"".autotmp_0481&type.[]interface {}"".autotmp_0480type.*uint8"".autotmp_0479"type.interface {}"".autotmp_0478*type.*[1]interface {}"".autotmp_0477&type.[]interface {}"".autotmp_0476type.*uint8"".autotmp_0475¯"type.interface {}"".autotmp_0473/&type.[]interface {}"".autotmp_0472ïtype.*uint8"".autotmp_0469"type.interface {}"".autotmp_0468$type.*bytes.Buffer"".autotmp_0467(type.[1]interface {}"".autotmp_0466$type.*bytes.Buffer"".autotmp_0465(type.[1]interface {}"".autotmp_0464type.bool"".autotmp_0462otype.string"".autotmp_0461ß$type.*bytes.Buffer"".autotmp_0460O(type.[1]interface {}"".autotmp_0459type.bool"".value¯"type.interface {}"".errmsgïtype.string"".valuetype.error"".valueÏtype.string"".value@"type.interface {} "".key type.string"".b$type.*bytes.Buffer"".f,type.*"".TextFormatter%¥Õ   hŽ%+Š*²> R0# £  >ß>TKDj/¶š495 +%5 Žš4¹O4Tgclocals·81e712ea8ab1acd9cb646b715ca7f2cdTgclocals·d0bfb266a2b14e29993ffd436ee6f54eÄ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.goþ&"".(*Logger).WriterÀ®eH‹ %H;awèëêHƒì8èH‹$H‹\$H‰\$ H‹\$@H‰$H‰D$H QjèYYH‹L$ HHH‰$H‰L$HH‰D$(H‰D$H‰T$0H‰T$èH‹\$ H‰\$HHƒÄ8à + 0runtime.morestack_noctxt6io.Pipe|:"".(*Logger).writerScanner·fŒruntime.newproc¨*"".writerFinalizer·f¶&type.*io.PipeWriterÖ2type.func(*io.PipeWriter)ˆ(runtime.SetFinalizer p"".writer/&type.*io.PipeWriter "".~r0&type.*io.PipeWriter"".loggertype.*"".Loggerp)Jo + <+[Tgclocals·2148c3737b2bb476685a1100a2e8343eTgclocals·61e2515c69061b8fed0e66ece719f936´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.goþ4"".(*Logger).writerScannerÀ¼eH‹ %H„$`ÿÿÿH;AwèëâHì H‹œ$0H‰\$HH‹1íH9脨H‹L$HH‰„$ÀH‰D$pH‰Œ$ÈH‰L$xHH‰$HÇD$HÇD$èH‹\$H‰œ$H‹\$ H‰œ$H‹\$(H‰œ$HH‰$èH‹|$H‰ùHƒÿ„1ÀèH‰L$@H‰ $Hƒ<$„ðH‹\$pH‰\$H‹\$xH‰\$èH‹\$@H‰$Hƒ<$„·Hƒ$HH‰\$èH‹D$@HÇ@H‰$Hƒ<$„yHƒ$8H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$èH‹\$@H‰\$8H‹\$8H‰$èH‹L$8¶\$€û„UHÇD$PHÇD$XHƒù„2Hi H$H‰ßH‰îH¥H¥H¥èH‹L$H‹D$ H‰Œ$°H‰„$¸H‰L$PH‰Œ$ÐH‰D$XH‰„$ØHœ$àHÇHÇCHœ$àHƒû„µHÇÂHÇÁH‰œ$ðH‰”$øH‰Œ$HH‰$Hœ$ÐH‰\$èH‹L$H‹D$H‹œ$ðH‰$H‰Œ$ H‰L$H‰„$¨H‰D$èH‹œ$(H‰$H‹œ$ðH‰\$H‹œ$øH‰\$H‹œ$H‰\$èé˜þÿÿ‰éDÿÿÿ‰éÇþÿÿH‰ÊHÇD$`HÇD$hH‰L$0Hƒù„H‹A`H‹IhH‰Œ$˜H‰„$H‹-H9è…^H‰$H‰L$H‹-H‰l$H‹-H‰l$èH‹T$0¶\$ €û„%1É1ÀH‰L$`H‰D$hH‰„$ˆHƒùH‰Œ$€„ÝHœ$àHÇHÇCHœ$àHƒû„ÍHÇÂHÇÅH‰œ$ðH‰”$øH‰¬$H‰ $H‰D$èH‹L$H‹D$H‹œ$ðH‰$H‰Œ$ H‰L$H‰„$¨H‰D$èH‹´$(H‰4$H5Hl$H‰ïH¥H¥H‹œ$ðH‰\$H‹œ$øH‰\$ H‹œ$H‰\$(èH‹œ$0H‰$èHÄ Éé,ÿÿÿHƒút H‹J`H‹BhéÌþÿÿ‰ëï‰é\þÿÿ‰%é{üÿÿ‰%é=üÿÿ‰%éüÿÿ‰éâûÿÿHH‰$HH‰\$HH‰\$èH‹D$é&ûÿÿ> +00runtime.morestack_noctxtj@go.itab.*io.PipeReader.io.ReaderÌtype.[]uint8‚"runtime.makesliceÞ$type.bufio.Scannerð"runtime.newobject¢Ä runtime.duffzeroü2runtime.writebarrieriface¼$bufio.ScanLines·fÐ.runtime.writebarrierptrê2runtime.writebarriersliceš*bufio.(*Scanner).Scanª2runtime.slicebytetostring¾ type.stringê runtime.convT2EÔ +2runtime.writebarrierifaceÄ $"".(*Logger).Printð  io.EOF¢  io.EOFº  io.EOFÎ runtime.ifaceeqþruntime.convI2Eè2runtime.writebarrierifaceŽ^go.string."Error while reading from Writer: %s"þ&"".(*Logger).Errorf ,io.(*PipeReader).CloseÞ&type.*io.PipeReaderôtype.io.ReaderŒ@go.itab.*io.PipeReader.io.Reader  runtime.typ2Itab À."".autotmp_0521"type.interface {}"".autotmp_0520*type.*[1]interface {}"".autotmp_0519&type.[]interface {}"".autotmp_0518Ÿtype.error"".autotmp_0517ÿ"type.interface {}"".autotmp_0515_&type.[]interface {}"".autotmp_0514ßtype.string"".autotmp_0513¿&type.*bufio.Scanner"".autotmp_0512&type.*bufio.Scanner"".autotmp_0510¿type.io.Reader"".autotmp_0509(type.[1]interface {}"".autotmp_0508Ÿtype.string"".autotmp_0507(type.[1]interface {}"".autotmp_0505/type.[]uint8"".autotmp_0504¯&type.*io.PipeReader "".~r0ÿtype.errorbufio.s·2ß&type.*bufio.Scanner "".~r0Ÿtype.stringbufio.r·2ßtype.io.Reader "".err¿type.error"".scannerÏ&type.*bufio.Scanner"".reader&type.*io.PipeReader"".loggertype.*"".Logger%Àö¿À„  +>&%Þ!­Ý_B€7Fwèm…˜€€Tgclocals·4a3831d274d2be9675c43f86862b9a60Tgclocals·a266581f5aa078217f9a8ca87caf12e6´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.goþ$"".writerFinalizer`ZeH‹ %H;awèëêHƒìH‹\$ H‰$èHƒÄà + 0runtime.morestack_noctxtH,io.(*PipeWriter).Close0"".writer&type.*io.PipeWriter0/0< +# Tgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·3280bececceccd33cb74587feedb1f9f´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.goþ"".initààeH‹ %H;awèëêHƒì¶€ût¶€ûuHƒÄÃè ÆèèèèèèèèèèèèèH‹$HH‰$H‰D$èèÆHƒÄÃ. + 0runtime.morestack_noctxt:"".initdone·R"".initdone·p"runtime.throwinit€"".initdone·Œbufio.init–strings.init runtime.initªsyscall.init´log.init¾sync.initÈ$encoding/json.initÒtime.initÜos.initæio.initðfmt.initúbytes.init„ "".Newš "".std¶.runtime.writebarrierptrÀ"".init·1Ì"".initdone·   x@+, +7yTgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·3280bececceccd33cb74587feedb1f9f´/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go¸/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.goþ(type..hash.[8]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_0532type.int"".autotmp_0531type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[8]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$type..eq.[8]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_0536?type.string"".autotmp_0535type.string"".autotmp_0534_type.int"".autotmp_0533Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[8]string"".ptype.*[8]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ4type..hash.[8]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0539type.int"".autotmp_0538type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[8]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ0type..eq.[8]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0543?"type.interface {}"".autotmp_0542"type.interface {}"".autotmp_0541_type.int"".autotmp_0540Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[8]interface {}"".p*type.*[8]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".(*Level).String€€eH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HH‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹\$8¶+@ˆ,$èH‹L$H‹D$H‰L$@H‰D$HHƒÄ0à + 0runtime.morestack_noctxt¢$go.string."logrus"Ì"go.string."Level"ø$go.string."String" "runtime.panicwrapÆ"".Level.String0` "".~r0type.string""..thistype.*"".Level`¥_ÀÀ 1Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ"".Hook.Fire€òeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#HÇD$@HÇD$HH‹\$8H‰\$H‹\$0H‰$H‹\$(H‹[ ÿÓH‹L$H‹D$H‰L$@H‰D$HHƒÄ à + 0runtime.morestack_noctxt¼ +P@ "".~r10type.error""..anon0 type.*"".Entry""..thistype."".Hook@^?€€ +^"Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþ"".Hook.Levels „eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#HÇD$8HÇD$@HÇD$HH‹\$0H‰$H‹\$(H‹[(ÿÓH‹T$H‹L$H‹D$H‰T$8H‰L$@H‰D$HHƒÄ à + 0runtime.morestack_noctxtº +P@ "".~r0 type.[]"".Level""..thistype."".Hook@g? +]3Tgclocals·9edc1f6d8fc7336ae101b48cbf822a45Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".(*LevelHooks).AddàÜeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹\$8H‹+H‰,$H‹\$@H‰\$H‹\$HH‰\$èHƒÄ0à + 0runtime.morestack_noctxt~$go.string."logrus"¨,go.string."LevelHooks"Ôgo.string."Add"ü"runtime.panicwrapÊ""".LevelHooks.Add0`"".hooktype."".Hook""..this&type.*"".LevelHooks`“_° +° +}3Tgclocals·284bdeb7a59f773ab3ee5877f5a03aa1Tgclocals·3280bececceccd33cb74587feedb1f9fþ*"".(*LevelHooks).FireÀ¦eH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$PHÇD$XH‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹\$8H‹+H‰,$¶\$@ˆ\$H‹\$HH‰\$èH‹L$H‹D$ H‰L$PH‰D$XHƒÄ0à + 0runtime.morestack_noctxt¢$go.string."logrus"Ì,go.string."LevelHooks"ø go.string."Fire" "runtime.panicwrapì$"".LevelHooks.FireP` "".~r20type.error"".entry type.*"".Entry"".leveltype."".Level""..this&type.*"".LevelHooks`¸_à à QTgclocals·dd0b304762533d7aaaca928b9df4b371Tgclocals·3280bececceccd33cb74587feedb1f9fþ&"".Formatter.Format€äeH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#HÇD$XHÇD$`HÇD$hHÇD$pHÇD$xH‹\$PH‰\$H‹\$HH‰$H‹\$@H‹[ ÿÓH‹t$H‹l$H‹T$ H‹L$(H‹D$0H‰t$XH‰l$`H‰T$hH‰L$pH‰D$xHƒÄ8à + 0runtime.morestack_noctxtò +€p "".~r2`type.error "".~r10type.[]uint8""..anon0 type.*"".Entry""..this"type."".Formatterp—oÀÀ +yGTgclocals·334cb8bc6294eb0b97ffb9b2c8e3805fTgclocals·3280bececceccd33cb74587feedb1f9fþ4type..hash.[1]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0552type.int"".autotmp_0551type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[1]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ0type..eq.[1]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0556?"type.interface {}"".autotmp_0555"type.interface {}"".autotmp_0554_type.int"".autotmp_0553Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[1]interface {}"".p*type.*[1]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ6type..hash."".TextFormatterÀ®eH‹ %H;awèëêHƒì H‹\$(H‰$Hƒ<$„HÇD$H‹\$8H‰\$èH‹D$H‹\$(H‰$Hƒ<$tgHƒ$HÇD$H‰D$8H‰D$èH‹D$H‹\$(H‰$Hƒ<$t,Hƒ$HÇD$H‰D$8H‰D$èH‹\$H‰\$@HƒÄ É%ëˉ%ë‰%éWÿÿÿ + + 0runtime.morestack_noctxt„runtime.memhashèruntime.strhashÌruntime.memhash@@ "".autotmp_0559type.uintptr"".autotmp_0558type.uintptr "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p,type.*"".TextFormatter@ž?@'àà AŸTgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ2type..eq."".TextFormatter€èeH‹ %H;awèëêHƒìHH‹\$PH‰$Hƒ<$„:H‹\$XH‰\$Hƒ|$„HÇD$èH‹\$PH‰$Hƒ<$„êH‹\$XH‰\$Hƒ|$„ÈHÇD$è¶\$€ûu +ÆD$hHƒÄHÃH‹\$PHƒû„H‹sH‹KH‹\$XHƒûtyH‹SH‹CH9ÁubH‰t$8H‰4$H‰L$@H‰L$H‰T$(H‰T$H‰D$0H‰D$è¶\$ €ût,H‹l$P¶]L‹D$XA¶h@8ët +ÆD$hHƒÄHÃÆD$hHƒÄHÃÆD$hHƒÄHÉ냉éiÿÿÿ‰%é,ÿÿÿ‰%é +ÿÿÿ‰%éÜþÿÿ‰%éºþÿÿ + + 0runtime.morestack_noctxtœ$runtime.memequal32Œ$runtime.memequal32ê runtime.eqstring@ "".autotmp_0562?type.string"".autotmp_0561type.string "".~r30type.bool"".s type.uintptr"".q,type.*"".TextFormatter"".p,type.*"".TextFormatter@ƒ†  G€€ M³Tgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ4type..hash.[4]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0565type.int"".autotmp_0564type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[4]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ0type..eq.[4]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0569?"type.interface {}"".autotmp_0568"type.interface {}"".autotmp_0567_type.int"".autotmp_0566Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[4]interface {}"".p*type.*[4]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ4type..hash.[3]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0572type.int"".autotmp_0571type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[3]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ0type..eq.[3]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0576?"type.interface {}"".autotmp_0575"type.interface {}"".autotmp_0574_type.int"".autotmp_0573Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[3]interface {}"".p*type.*[3]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.goþ$"".StdLogger.FatalàÎeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$0H‰$H‹\$(H‹[ ÿÓHƒÄ à + 0runtime.morestack_noctxtÀ +P@""..anon0 &type.[]interface {}""..this"type."".StdLogger@L? +pp +`Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþ&"".StdLogger.Fatalf€öeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(H‹\$@H‰$H‹\$8H‹[(ÿÓHƒÄ0à + 0runtime.morestack_noctxtè +p`""..anon1@&type.[]interface {}""..anon0 type.string""..this"type."".StdLogger``_€€ +t Tgclocals·a3682a93adc1ecf7106501ba903ce847Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".StdLogger.FatallnàÎeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$0H‰$H‹\$(H‹[0ÿÓHƒÄ à + 0runtime.morestack_noctxtÀ +P@""..anon0 &type.[]interface {}""..this"type."".StdLogger@L? +pp +`Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþ$"".StdLogger.PanicàÎeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$0H‰$H‹\$(H‹[8ÿÓHƒÄ à + 0runtime.morestack_noctxtÀ +P@""..anon0 &type.[]interface {}""..this"type."".StdLogger@L? +pp +`Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþ&"".StdLogger.Panicf€öeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(H‹\$@H‰$H‹\$8H‹[@ÿÓHƒÄ0à + 0runtime.morestack_noctxtè +p`""..anon1@&type.[]interface {}""..anon0 type.string""..this"type."".StdLogger``_€€ +t Tgclocals·a3682a93adc1ecf7106501ba903ce847Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".StdLogger.PaniclnàÎeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$0H‰$H‹\$(H‹[HÿÓHƒÄ à + 0runtime.morestack_noctxtÀ +P@""..anon0 &type.[]interface {}""..this"type."".StdLogger@L? +pp +`Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþ$"".StdLogger.PrintàÎeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$0H‰$H‹\$(H‹[PÿÓHƒÄ à + 0runtime.morestack_noctxtÀ +P@""..anon0 &type.[]interface {}""..this"type."".StdLogger@L? +pp +`Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþ&"".StdLogger.Printf€öeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$HH‰\$H‹\$PH‰\$H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(H‹\$@H‰$H‹\$8H‹[XÿÓHƒÄ0à + 0runtime.morestack_noctxtè +p`""..anon1@&type.[]interface {}""..anon0 type.string""..this"type."".StdLogger``_€€ +t Tgclocals·a3682a93adc1ecf7106501ba903ce847Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".StdLogger.PrintlnàÎeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$HH‰\$H‹\$0H‰$H‹\$(H‹[`ÿÓHƒÄ à + 0runtime.morestack_noctxtÀ +P@""..anon0 &type.[]interface {}""..this"type."".StdLogger@L? +p p +`Tgclocals·32f137afc3f53351f1adc065fe3b9f83Tgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e475e3c2360b557d64285d9b9a4e506400 +þTgclocals·a7c27d2bfcc924fa8a92b6b29b7218b100þTgclocals·0719ac7e4405ec7094b2d696ead0af25((,.þTgclocals·363b18caf0020ca418fd378dbb75c855((þ"go.string.""0, "go.string.""þTgclocals·44e348188e22fef6300f71ab26e45197 °þTgclocals·896a3e2c9de7030cc72aa334f690557d  +þTgclocals·396579fca70851935df9d21183ca82fd  +¾þTgclocals·0723c8881b4d19cb48cb8887cfa073be  ÊÊþTgclocals·cbbe1bd73f3c341fc477038dafd9ade4pp4ªZ/ªZ +þTgclocals·fdf817463ca91d173b8e929c420286bd@@ + + + + + +þ,4go.itab.*os.File.io.Writerþ,>go.itab.*bytes.Buffer.io.ReaderþJgo.string."Failed to fire hook: %v\n"`RFailed to fire hook: %v + Jgo.string."Failed to fire hook: %v\n"þRgo.string."Failed to obtain reader, %v\n"`ZFailed to obtain reader, %v + Rgo.string."Failed to obtain reader, %v\n"þPgo.string."Failed to write to log, %v\n"`XFailed to write to log, %v + Pgo.string."Failed to write to log, %v\n"þTgclocals·22d60cc41efa02a0ac67663f051098e8°° +*À¼<, +¼< ,þTgclocals·65a30d49934626502b3d799f3cf8d99e`` +&&&&&&&&&&þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·29f0050a5ee7c2b9348a75428171d7de þTgclocals·9ff42bf311af152488d11f0f78c8d5ce  + +þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·e8d3240594e259421cd655d317fed5fe(( ŠŠŠþTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·e8d3240594e259421cd655d317fed5fe(( ŠŠŠþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·0a3395567ab7eee3bb936aced49af517 ŠþTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·e8d3240594e259421cd655d317fed5fe(( ŠŠŠþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·0a3395567ab7eee3bb936aced49af517 ŠþTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·e8d3240594e259421cd655d317fed5fe(( ŠŠŠþTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·e8d3240594e259421cd655d317fed5fe(( ŠŠŠþTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·e8d3240594e259421cd655d317fed5fe(( ŠŠŠþTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9f0d5ba6770c4a1ed4fa771547e96df1 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a7a3692b8e27e823add69ec4239ba55fþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·20671cc48303dfd2b9d73bba3d1850b7 þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·20671cc48303dfd2b9d73bba3d1850b7 þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·5d05a78f811f5c3f62710534cdce0004þTgclocals·0115f8d53b75c1696444f08ad03251d9þTgclocals·7c868751a5d2fdd881613692c78d6476 þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·20671cc48303dfd2b9d73bba3d1850b7 þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·66ae2244d17a3b89653cba445a520071 +òþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f271231f400e778e0f59be25f7a26a56 +"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þ go.string."time"0*time go.string."time"þ.go.string."fields.time"@8 fields.time .go.string."fields.time"þgo.string."msg"0(msg go.string."msg"þ,go.string."fields.msg"@6 +fields.msg ,go.string."fields.msg"þ"go.string."level"0,level "go.string."level"þ0go.string."fields.level"@: fields.level 0go.string."fields.level"þTgclocals·cfe802ef097eb87dc1d2f379757036b4(( /þTgclocals·15395a9df917b4c9aa74d5c6c7e1ebf4((þTgclocals·5347b08d42ef15c0183233bde05091ab00  +þTgclocals·a02efc190d1c7709e4c72531a85b968d00....þTgclocals·7e4aab61b173caafc98b406c57151fa1 þTgclocals·7ce35767da505d40dfb8f85871f02969  +&&þJgo.string."2006-01-02T15:04:05Z07:00"`T2006-01-02T15:04:05Z07:00 Jgo.string."2006-01-02T15:04:05Z07:00"þ`go.string."Failed to marshal fields to JSON, %v"pj$Failed to marshal fields to JSON, %v `go.string."Failed to marshal fields to JSON, %v"þTgclocals·859165c97b106654e8e33715962a8293ààTªZ<ªZªZ€ªZ¼ªZ<ªZ€¼<ÀÀþTgclocals·91781b467bdd49442cfecbf49067c104€€ + + + + + + + + + + + + + +þ,Lgo.itab.*"".TextFormatter."".FormatterþTgclocals·87b2493d48b5216b7adc3dac672105cb@@ (* +þTgclocals·e9c510091732f30fce387c9f1e977134@@þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·0a89f44bdb06c71b1e3fde611d9babf488 ÊÊÊÊÊþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·ab01a2d55089ff50c402006df1039c3988 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·fc96ae191c2547955912928601e8595988 ŠŠŠŠŠþTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·0442504e096b61648fffc20fe86cec6688"(þTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þ"go.string."debug"0,debug "go.string."debug"þ go.string."info"0*info go.string."info"þ&go.string."warning"00warning &go.string."warning"þ"go.string."error"0,error "go.string."error"þ"go.string."fatal"0,fatal "go.string."fatal"þ"go.string."panic"0,panic "go.string."panic"þ&go.string."unknown"00unknown &go.string."unknown"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a73fd2a0c6f832642aa9216fd9c5e6beþ go.string."warn"0*warn go.string."warn"þPgo.string."not a valid logrus Level: %q"`Znot a valid logrus Level: %q Pgo.string."not a valid logrus Level: %q"þTgclocals·626b2db390378ab5b89c88b48426687f00 òòþTgclocals·db0f6b31ff49b3f025910ec03f9742fa00 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a7a3692b8e27e823add69ec4239ba55fþTgclocals·0528ab8f76149a707fd2f0025c2178a3þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a7a3692b8e27e823add69ec4239ba55fþTgclocals·cfaa0e57cb7ad41983cddbe4e85a52ca°° +>¨j¨j ¨j * +þTgclocals·71f03f9031d1479950f850ff028ecf79`` + + + + + + + + + + +þ,>go.itab.*bytes.Buffer.io.WriterþTgo.string."\x1b[%dm%s\x1b[0m[%04d] %-44s "`R[%dm%s[%04d] %-44s  Tgo.string."\x1b[%dm%s\x1b[0m[%04d] %-44s "þPgo.string."\x1b[%dm%s\x1b[0m[%s] %-44s "PN[%dm%s[%s] %-44s  Pgo.string."\x1b[%dm%s\x1b[0m[%s] %-44s "þBgo.string." \x1b[%dm%s\x1b[0m=%v"@@ [%dm%s=%v Bgo.string." \x1b[%dm%s\x1b[0m=%v"þTgclocals·3d6acc133f31046c5a57fcb873eb249f°° R €(€üÿ€üÿ ˆ(ˆüÿˆüÿ€âêÀÿ*ÀÿÀÿþTgclocals·40341f41d3f930f66258415e27eda61dpp ªªªªªªªªªªªªþTgclocals·8d600a433c6aaa81a4fe446d95c5546b þTgclocals·d7e8a62d22b1cde6d92b17a55c33fe8f þgo.string."%q"0&%q go.string."%q"þTgclocals·d0bfb266a2b14e29993ffd436ee6f54eÐÐ .ð  +òò°°° +ððþTgclocals·81e712ea8ab1acd9cb646b715ca7f2cdpp ************þTgclocals·61e2515c69061b8fed0e66ece719f936 þTgclocals·2148c3737b2bb476685a1100a2e8343e þ,@go.itab.*io.PipeReader.io.Readerþ^go.string."Error while reading from Writer: %s"ph#Error while reading from Writer: %s ^go.string."Error while reading from Writer: %s"þTgclocals·a266581f5aa078217f9a8ca87caf12e6ÐÐ <€ € € òòððþTgclocals·4a3831d274d2be9675c43f86862b9a60pp  + + + + + + + + + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·3280bececceccd33cb74587feedb1f9fþ* "".stdtype.*"".Loggerþ* "".baseTimestamp0type.time.Timeþ,"".isTerminaltype.boolþ,"".initdone·type.uint8þ"".NewEntry·f"".NewEntryþ$runtime.makemap·fruntime.makemapþ(runtime.newobject·f"runtime.newobjectþ4runtime.writebarrierptr·f.runtime.writebarrierptrþ,runtime.throwreturn·f&runtime.throwreturnþ*"".(*Entry).Reader·f$"".(*Entry).Readerþ8runtime.writebarrierslice·f2runtime.writebarriersliceþ*"".(*Entry).String·f$"".(*Entry).Stringþ*runtime.panicslice·f$runtime.panicsliceþ8runtime.slicebytetostring·f2runtime.slicebytetostringþ0"".(*Entry).WithField·f*"".(*Entry).WithFieldþ*runtime.mapassign1·f$runtime.mapassign1þ2"".(*Entry).WithFields·f,"".(*Entry).WithFieldsþ,runtime.mapiterinit·f&runtime.mapiterinitþ,runtime.mapiternext·f&runtime.mapiternextþ$"".(*Entry).log·f"".(*Entry).logþtime.Now·ftime.Nowþ6runtime.writebarrierfat3·f0runtime.writebarrierfat3þ:runtime.writebarrierstring·f4runtime.writebarrierstringþ*"".LevelHooks.Fire·f$"".LevelHooks.Fireþ*sync.(*Mutex).Lock·f$sync.(*Mutex).Lockþ&runtime.typ2Itab·f runtime.typ2Itabþ$runtime.convI2E·fruntime.convI2Eþ8runtime.writebarrieriface·f2runtime.writebarrierifaceþfmt.Fprintf·ffmt.Fprintfþ.sync.(*Mutex).Unlock·f(sync.(*Mutex).Unlockþ(runtime.deferproc·f"runtime.deferprocþ,runtime.deferreturn·f&runtime.deferreturnþio.Copy·fio.Copyþ$runtime.gopanic·fruntime.gopanicþ("".(*Entry).Debug·f""".(*Entry).Debugþfmt.Sprint·ffmt.Sprintþ("".(*Entry).Print·f""".(*Entry).Printþ&"".(*Entry).Info·f "".(*Entry).Infoþ&"".(*Entry).Warn·f "".(*Entry).Warnþ,"".(*Entry).Warning·f&"".(*Entry).Warningþ("".(*Entry).Error·f""".(*Entry).Errorþ("".(*Entry).Fatal·f""".(*Entry).Fatalþos.Exit·fos.Exitþ("".(*Entry).Panic·f""".(*Entry).Panicþ$runtime.convT2E·fruntime.convT2Eþ*"".(*Entry).Debugf·f$"".(*Entry).Debugfþfmt.Sprintf·ffmt.Sprintfþ("".(*Entry).Infof·f""".(*Entry).Infofþ*"".(*Entry).Printf·f$"".(*Entry).Printfþ("".(*Entry).Warnf·f""".(*Entry).Warnfþ."".(*Entry).Warningf·f("".(*Entry).Warningfþ*"".(*Entry).Errorf·f$"".(*Entry).Errorfþ*"".(*Entry).Fatalf·f$"".(*Entry).Fatalfþ*"".(*Entry).Panicf·f$"".(*Entry).Panicfþ,"".(*Entry).Debugln·f&"".(*Entry).Debuglnþ0"".(*Entry).sprintlnn·f*"".(*Entry).sprintlnnþ*"".(*Entry).Infoln·f$"".(*Entry).Infolnþ,"".(*Entry).Println·f&"".(*Entry).Printlnþ*"".(*Entry).Warnln·f$"".(*Entry).Warnlnþ0"".(*Entry).Warningln·f*"".(*Entry).Warninglnþ,"".(*Entry).Errorln·f&"".(*Entry).Errorlnþ,"".(*Entry).Fatalln·f&"".(*Entry).Fatallnþ,"".(*Entry).Panicln·f&"".(*Entry).Paniclnþfmt.Sprintln·ffmt.Sprintlnþ("".StandardLogger·f""".StandardLoggerþ"".SetOutput·f"".SetOutputþ$"".SetFormatter·f"".SetFormatterþ"".SetLevel·f"".SetLevelþ"".GetLevel·f"".GetLevelþ"".AddHook·f"".AddHookþ("".LevelHooks.Add·f""".LevelHooks.Addþ"".WithField·f"".WithFieldþ2"".(*Logger).WithField·f,"".(*Logger).WithFieldþ "".WithFields·f"".WithFieldsþ4"".(*Logger).WithFields·f."".(*Logger).WithFieldsþ"".Debug·f"".Debugþ*"".(*Logger).Debug·f$"".(*Logger).Debugþ"".Print·f"".Printþ*"".(*Logger).Print·f$"".(*Logger).Printþ"".Info·f"".Infoþ("".(*Logger).Info·f""".(*Logger).Infoþ"".Warn·f"".Warnþ("".(*Logger).Warn·f""".(*Logger).Warnþ"".Warning·f"".Warningþ."".(*Logger).Warning·f("".(*Logger).Warningþ"".Error·f"".Errorþ*"".(*Logger).Error·f$"".(*Logger).Errorþ"".Panic·f"".Panicþ*"".(*Logger).Panic·f$"".(*Logger).Panicþ"".Fatal·f"".Fatalþ*"".(*Logger).Fatal·f$"".(*Logger).Fatalþ"".Debugf·f"".Debugfþ,"".(*Logger).Debugf·f&"".(*Logger).Debugfþ"".Printf·f"".Printfþ,"".(*Logger).Printf·f&"".(*Logger).Printfþ"".Infof·f"".Infofþ*"".(*Logger).Infof·f$"".(*Logger).Infofþ"".Warnf·f"".Warnfþ*"".(*Logger).Warnf·f$"".(*Logger).Warnfþ"".Warningf·f"".Warningfþ0"".(*Logger).Warningf·f*"".(*Logger).Warningfþ"".Errorf·f"".Errorfþ,"".(*Logger).Errorf·f&"".(*Logger).Errorfþ"".Panicf·f"".Panicfþ,"".(*Logger).Panicf·f&"".(*Logger).Panicfþ"".Fatalf·f"".Fatalfþ,"".(*Logger).Fatalf·f&"".(*Logger).Fatalfþ"".Debugln·f"".Debuglnþ."".(*Logger).Debugln·f("".(*Logger).Debuglnþ"".Println·f"".Printlnþ."".(*Logger).Println·f("".(*Logger).Printlnþ"".Infoln·f"".Infolnþ,"".(*Logger).Infoln·f&"".(*Logger).Infolnþ"".Warnln·f"".Warnlnþ,"".(*Logger).Warnln·f&"".(*Logger).Warnlnþ"".Warningln·f"".Warninglnþ2"".(*Logger).Warningln·f,"".(*Logger).Warninglnþ"".Errorln·f"".Errorlnþ."".(*Logger).Errorln·f("".(*Logger).Errorlnþ"".Panicln·f"".Paniclnþ."".(*Logger).Panicln·f("".(*Logger).Paniclnþ"".Fatalln·f"".Fatallnþ."".(*Logger).Fatalln·f("".(*Logger).Fatallnþ0"".prefixFieldClashes·f*"".prefixFieldClashesþ:runtime.mapaccess2_faststr·f4runtime.mapaccess2_faststrþ:runtime.mapaccess1_faststr·f4runtime.mapaccess1_faststrþ*runtime.mapaccess1·f$runtime.mapaccess1þ(runtime.growslice·f"runtime.growsliceþ:"".(*JSONFormatter).Format·f4"".(*JSONFormatter).Formatþ*runtime.efacethash·f$runtime.efacethashþ*runtime.assertE2I2·f$runtime.assertE2I2þ&time.Time.Format·f time.Time.Formatþ$"".Level.String·f"".Level.Stringþ0encoding/json.Marshal·f*encoding/json.Marshalþfmt.Errorf·ffmt.Errorfþ"".New·f "".Newþ "".ParseLevel·f"".ParseLevelþ(runtime.cmpstring·f"runtime.cmpstringþ&runtime.eqstring·f runtime.eqstringþ "".IsTerminal·f"".IsTerminalþ&syscall.Syscall6·f syscall.Syscall6þ"".init·1·f"".init·1þ"".miniTS·f"".miniTSþtime.Since·ftime.Sinceþ:"".(*TextFormatter).Format·f4"".(*TextFormatter).Formatþ(runtime.makeslice·f"runtime.makesliceþsort.Strings·fsort.StringsþF"".(*TextFormatter).printColored·f@"".(*TextFormatter).printColoredþJ"".(*TextFormatter).appendKeyValue·fD"".(*TextFormatter).appendKeyValueþ8bytes.(*Buffer).WriteByte·f2bytes.(*Buffer).WriteByteþ$strings.ToUpper·fstrings.ToUpperþ$"".needsQuoting·f"".needsQuotingþ,runtime.stringiter2·f&runtime.stringiter2þ &type..alg.[8]string0bruntime.gcbits.0x48484848484848480000000000000000P*go.string."[8]string"p.go.weak.type.*[8]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[8]string/[8]stringtype.[8]stringþ4go.string."[]interface {}"@>[]interface {} 4go.string."[]interface {}"þ&type.[]interface {}  p“ê/   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]interface {}"p8go.weak.type.*[]interface {}€"runtime.zerovalue"type.interface {}þRgo.typelink.[]interface {}/[]interface {}&type.[]interface {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ*logrus.Fields 4go.string."*logrus.Fields"þtype.*"".Fields  £XrŸ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*logrus.Fields"p0go.weak.type.**"".Fields€"runtime.zerovaluetype."".Fieldsþ2go.string."logrus.Fields"@< logrus.Fields 2go.string."logrus.Fields"þ$go.string."Fields"0.Fields $go.string."Fields"þtype."".Fields°°²ƒ€i5 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."logrus.Fields"ptype.*"".Fields€"runtime.zerovaluetype.string "type.interface {}°Ftype.map.bucket[string]interface {}À@type.map.hdr[string]interface {}`àtype."".Fieldsà$go.string."Fields"ð"go.importpath."".€°type."".Fieldsþ2go.string."*logrus.Level"@< *logrus.Level 2go.string."*logrus.Level"þ$go.string."logrus"0.logrus $go.string."logrus"þ"go.string."Level"0,Level "go.string."Level"þ$go.string."String"0.String $go.string."String"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þLgo.string."func(*logrus.Level) string"`Vfunc(*logrus.Level) string Lgo.string."func(*logrus.Level) string"þ6type.func(*"".Level) string  ©ëx3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(*logrus.Level) string"pHgo.weak.type.*func(*"".Level) string€"runtime.zerovalue €6type.func(*"".Level) stringÐ6type.func(*"".Level) string€type.*"".Leveltype.stringþ2go.string."func() string"@< func() string 2go.string."func() string"þ$type.func() string¢mË3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."func() string"p6go.weak.type.*func() string€"runtime.zerovalue €$type.func() stringЀ$type.func() string€type.stringþtype.*"".LevelÐІlÙU6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."*logrus.Level"p.go.weak.type.**"".Level€"runtime.zerovaluetype."".Level` type.*"".LevelÀðtype.*"".Levelð$go.string."String"$type.func() string 6type.func(*"".Level) string°$"".(*Level).StringÀ$"".(*Level).Stringþ^runtime.gcbits.0x000000000000000000000000000000 þ0go.string."logrus.Level"@: logrus.Level 0go.string."logrus.Level"þJgo.string."func(logrus.Level) string"`Tfunc(logrus.Level) string Jgo.string."func(logrus.Level) string"þ4type.func("".Level) string  NÚâ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."func(logrus.Level) string"pFgo.weak.type.*func("".Level) string€"runtime.zerovalue €4type.func("".Level) stringÐ4type.func("".Level) string€type."".Leveltype.stringþtype."".LevelÀÀI ˆ @ runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P0go.string."logrus.Level"ptype.*"".Level€"runtime.zerovalue`type."".Level"go.string."Level" "go.importpath."".°àtype."".Levelà$go.string."String"€$type.func() string4type.func("".Level) string $"".(*Level).String°"".Level.StringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·32f137afc3f53351f1adc065fe3b9f83 ++þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9edc1f6d8fc7336ae101b48cbf822a45 + þ2go.string."*logrus.Entry"@< *logrus.Entry 2go.string."*logrus.Entry"þ`go.string."func(*logrus.Entry, ...interface {})"pj$func(*logrus.Entry, ...interface {}) `go.string."func(*logrus.Entry, ...interface {})"þJtype.func(*"".Entry, ...interface {})  î¡]ì3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*logrus.Entry, ...interface {})"p\go.weak.type.*func(*"".Entry, ...interface {})€"runtime.zerovalue €Jtype.func(*"".Entry, ...interface {})РJtype.func(*"".Entry, ...interface {})€type.*"".Entry&type.[]interface {}þpgo.string."func(*logrus.Entry, string, ...interface {})"€z,func(*logrus.Entry, string, ...interface {}) pgo.string."func(*logrus.Entry, string, ...interface {})"þZtype.func(*"".Entry, string, ...interface {})°°¾ê{¬3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."func(*logrus.Entry, string, ...interface {})"plgo.weak.type.*func(*"".Entry, string, ...interface {})€"runtime.zerovalue €Ztype.func(*"".Entry, string, ...interface {})аZtype.func(*"".Entry, string, ...interface {})€type.*"".Entrytype.string &type.[]interface {}þlgo.string."func(*logrus.Entry) (*bytes.Buffer, error)"€v*func(*logrus.Entry) (*bytes.Buffer, error) lgo.string."func(*logrus.Entry) (*bytes.Buffer, error)"þVtype.func(*"".Entry) (*bytes.Buffer, error)°°új]´3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(*logrus.Entry) (*bytes.Buffer, error)"phgo.weak.type.*func(*"".Entry) (*bytes.Buffer, error)€"runtime.zerovalue €Vtype.func(*"".Entry) (*bytes.Buffer, error)ÐVtype.func(*"".Entry) (*bytes.Buffer, error)€type.*"".Entry$type.*bytes.Buffer type.errorþ^go.string."func(*logrus.Entry) (string, error)"ph#func(*logrus.Entry) (string, error) ^go.string."func(*logrus.Entry) (string, error)"þHtype.func(*"".Entry) (string, error)°°ÇÀ}3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*logrus.Entry) (string, error)"pZgo.weak.type.*func(*"".Entry) (string, error)€"runtime.zerovalue €Htype.func(*"".Entry) (string, error)ÐHtype.func(*"".Entry) (string, error)€type.*"".Entrytype.string type.errorþ†go.string."func(*logrus.Entry, string, interface {}) *logrus.Entry"7func(*logrus.Entry, string, interface {}) *logrus.Entry †go.string."func(*logrus.Entry, string, interface {}) *logrus.Entry"þhtype.func(*"".Entry, string, interface {}) *"".EntryÀÀ·4Œ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P†go.string."func(*logrus.Entry, string, interface {}) *logrus.Entry"pzgo.weak.type.*func(*"".Entry, string, interface {}) *"".Entry€"runtime.zerovalue €htype.func(*"".Entry, string, interface {}) *"".Entryаhtype.func(*"".Entry, string, interface {}) *"".Entry€type.*"".Entrytype.string "type.interface {}°type.*"".Entryþxgo.string."func(*logrus.Entry, logrus.Fields) *logrus.Entry"‚0func(*logrus.Entry, logrus.Fields) *logrus.Entry xgo.string."func(*logrus.Entry, logrus.Fields) *logrus.Entry"þRtype.func(*"".Entry, "".Fields) *"".Entry°°O\ò¾3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*logrus.Entry, logrus.Fields) *logrus.Entry"pdgo.weak.type.*func(*"".Entry, "".Fields) *"".Entry€"runtime.zerovalue €Rtype.func(*"".Entry, "".Fields) *"".EntryРRtype.func(*"".Entry, "".Fields) *"".Entry€type.*"".Entrytype."".Fields type.*"".Entryþjgo.string."func(*logrus.Entry, logrus.Level, string)"€t)func(*logrus.Entry, logrus.Level, string) jgo.string."func(*logrus.Entry, logrus.Level, string)"þLtype.func(*"".Entry, "".Level, string)°°²Òßî3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*logrus.Entry, logrus.Level, string)"p^go.weak.type.*func(*"".Entry, "".Level, string)€"runtime.zerovalue €Ltype.func(*"".Entry, "".Level, string)аLtype.func(*"".Entry, "".Level, string)€type.*"".Entrytype."".Level type.stringþngo.string."func(*logrus.Entry, ...interface {}) string"€x+func(*logrus.Entry, ...interface {}) string ngo.string."func(*logrus.Entry, ...interface {}) string"þXtype.func(*"".Entry, ...interface {}) string°°˜¶ŒŒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(*logrus.Entry, ...interface {}) string"pjgo.weak.type.*func(*"".Entry, ...interface {}) string€"runtime.zerovalue €Xtype.func(*"".Entry, ...interface {}) stringРXtype.func(*"".Entry, ...interface {}) string€type.*"".Entry&type.[]interface {} type.stringþ"go.string."Debug"0,Debug "go.string."Debug"þBgo.string."func(...interface {})"PLfunc(...interface {}) Bgo.string."func(...interface {})"þ4type.func(...interface {})Ë3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."func(...interface {})"pFgo.weak.type.*func(...interface {})€"runtime.zerovalue €4type.func(...interface {})Ð4type.func(...interface {})€&type.[]interface {}þ$go.string."Debugf"0.Debugf $go.string."Debugf"þRgo.string."func(string, ...interface {})"`\func(string, ...interface {}) Rgo.string."func(string, ...interface {})"þDtype.func(string, ...interface {})  õµ@µ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."func(string, ...interface {})"pVgo.weak.type.*func(string, ...interface {})€"runtime.zerovalue €Dtype.func(string, ...interface {})РDtype.func(string, ...interface {})€type.string&type.[]interface {}þ&go.string."Debugln"00Debugln &go.string."Debugln"þ"go.string."Error"0,Error "go.string."Error"þ$go.string."Errorf"0.Errorf $go.string."Errorf"þ&go.string."Errorln"00Errorln &go.string."Errorln"þ"go.string."Fatal"0,Fatal "go.string."Fatal"þ$go.string."Fatalf"0.Fatalf $go.string."Fatalf"þ&go.string."Fatalln"00Fatalln &go.string."Fatalln"þ go.string."Info"0*Info go.string."Info"þ"go.string."Infof"0,Infof "go.string."Infof"þ$go.string."Infoln"0.Infoln $go.string."Infoln"þ"go.string."Panic"0,Panic "go.string."Panic"þ$go.string."Panicf"0.Panicf $go.string."Panicf"þ&go.string."Panicln"00Panicln &go.string."Panicln"þ"go.string."Print"0,Print "go.string."Print"þ$go.string."Printf"0.Printf $go.string."Printf"þ&go.string."Println"00Println &go.string."Println"þ$go.string."Reader"0.Reader $go.string."Reader"þRgo.string."func() (*bytes.Buffer, error)"`\func() (*bytes.Buffer, error) Rgo.string."func() (*bytes.Buffer, error)"þDtype.func() (*bytes.Buffer, error)  ¿(3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."func() (*bytes.Buffer, error)"pVgo.weak.type.*func() (*bytes.Buffer, error)€"runtime.zerovalue €Dtype.func() (*bytes.Buffer, error)ЀDtype.func() (*bytes.Buffer, error)€$type.*bytes.Buffertype.errorþDgo.string."func() (string, error)"PNfunc() (string, error) Dgo.string."func() (string, error)"þ6type.func() (string, error)  ¯±u¦3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."func() (string, error)"pHgo.weak.type.*func() (string, error)€"runtime.zerovalue €6type.func() (string, error)Ѐ6type.func() (string, error)€type.stringtype.errorþ go.string."Warn"0*Warn go.string."Warn"þ"go.string."Warnf"0,Warnf "go.string."Warnf"þ&go.string."Warning"00Warning &go.string."Warning"þ(go.string."Warningf"@2Warningf (go.string."Warningf"þ*go.string."Warningln"@4 Warningln *go.string."Warningln"þ$go.string."Warnln"0.Warnln $go.string."Warnln"þ*go.string."WithField"@4 WithField *go.string."WithField"þhgo.string."func(string, interface {}) *logrus.Entry"€r(func(string, interface {}) *logrus.Entry hgo.string."func(string, interface {}) *logrus.Entry"þRtype.func(string, interface {}) *"".Entry°°¼Ã€%3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(string, interface {}) *logrus.Entry"pdgo.weak.type.*func(string, interface {}) *"".Entry€"runtime.zerovalue €Rtype.func(string, interface {}) *"".EntryРRtype.func(string, interface {}) *"".Entry€type.string"type.interface {} type.*"".Entryþ,go.string."WithFields"@6 +WithFields ,go.string."WithFields"þZgo.string."func(logrus.Fields) *logrus.Entry"pd!func(logrus.Fields) *logrus.Entry Zgo.string."func(logrus.Fields) *logrus.Entry"þ[]logrus.Level 4go.string."[]logrus.Level"þtype.[]"".Level  Ec¡Þ   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]logrus.Level"p0go.weak.type.*[]"".Level€"runtime.zerovaluetype."".LevelþJgo.typelink.[]logrus.Level/[]"".Leveltype.[]"".LevelþBgo.string."func() []logrus.Level"PLfunc() []logrus.Level Bgo.string."func() []logrus.Level"þ,type.func() []"".Levell%‰¦3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."func() []logrus.Level"p>go.weak.type.*func() []"".Level€"runtime.zerovalue €,type.func() []"".LevelЀ,type.func() []"".Level€type.[]"".Levelþ0go.string."*logrus.Hook"@: *logrus.Hook 0go.string."*logrus.Hook"þtype.*"".Hook   ©Ð>6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."*logrus.Hook"p,go.weak.type.**"".Hook€"runtime.zerovaluetype."".Hookþbruntime.gcbits.0x8c000000000000000000000000000000 Œþ.go.string."logrus.Hook"@8 logrus.Hook .go.string."logrus.Hook"þ go.string."Fire"0*Fire go.string."Fire"þ$go.string."Levels"0.Levels $go.string."Levels"þ go.string."Hook"0*Hook go.string."Hook"þtype."".HookððÛ¡T à runtime.algarray0bruntime.gcbits.0x8c000000000000000000000000000000P.go.string."logrus.Hook"ptype.*"".Hook€"runtime.zerovalueÀtype."".HookÀ go.string."Fire"à4type.func(*"".Entry) errorð$go.string."Levels",type.func() []"".Level` type."".Hook  go.string."Hook"°"go.importpath."".Àðtype."".Hookþ2go.string."[]logrus.Hook"@< []logrus.Hook 2go.string."[]logrus.Hook"þtype.[]"".Hook  ¢Ï†$   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P2go.string."[]logrus.Hook"p.go.weak.type.*[]"".Hook€"runtime.zerovaluetype."".HookþFgo.typelink.[]logrus.Hook/[]"".Hooktype.[]"".Hookþ6go.string."[8]logrus.Level"@@[8]logrus.Level 6go.string."[8]logrus.Level"þ type.[8]"".LevelÀÀ¦-æ.‘   runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P6go.string."[8]logrus.Level"p2go.weak.type.*[8]"".Level€"runtime.zerovaluetype."".Level type.[]"".LevelþNgo.typelink.[8]logrus.Level/[8]"".Level type.[8]"".Levelþ6go.string."[][]logrus.Hook"@@[][]logrus.Hook 6go.string."[][]logrus.Hook"þ type.[][]"".Hook  ×€%Ž   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P6go.string."[][]logrus.Hook"p2go.weak.type.*[][]"".Hook€"runtime.zerovaluetype.[]"".HookþNgo.typelink.[][]logrus.Hook/[][]"".Hook type.[][]"".Hookþbruntime.gcbits.0x48844448844448844448844400000000 H„DH„DH„DH„Dþ8go.string."[8][]logrus.Hook"PB[8][]logrus.Hook 8go.string."[8][]logrus.Hook"þ"type.[8][]"".HookÀÀÀ$ ø à runtime.algarray0bruntime.gcbits.0x48844448844448844448844400000000P8go.string."[8][]logrus.Hook"p4go.weak.type.*[8][]"".Hook€"runtime.zerovaluetype.[]"".Hook  type.[][]"".HookþRgo.typelink.[8][]logrus.Hook/[8][]"".Hook"type.[8][]"".Hookþdgo.string."*map.bucket[logrus.Level][]logrus.Hook"pn&*map.bucket[logrus.Level][]logrus.Hook dgo.string."*map.bucket[logrus.Level][]logrus.Hook"þFtype.*map.bucket["".Level][]"".Hook  ÛÒ¾%6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pdgo.string."*map.bucket[logrus.Level][]logrus.Hook"pXgo.weak.type.**map.bucket["".Level][]"".Hook€"runtime.zerovalueDtype.map.bucket["".Level][]"".Hookþ,Ltype..gc.map.bucket["".Level][]"".Hook8þTtype..gcprog.map.bucket["".Level][]"".HookeY–eY–%þbgo.string."map.bucket[logrus.Level][]logrus.Hook"pl%map.bucket[logrus.Level][]logrus.Hook bgo.string."map.bucket[logrus.Level][]logrus.Hook"þDtype.map.bucket["".Level][]"".Hook°°ØÊŸ"YÐ à runtime.algarray0Ltype..gc.map.bucket["".Level][]"".Hook@Ttype..gcprog.map.bucket["".Level][]"".HookPbgo.string."map.bucket[logrus.Level][]logrus.Hook"pVgo.weak.type.*map.bucket["".Level][]"".Hook€"runtime.zerovalueÀDtype.map.bucket["".Level][]"".HookÀ go.string."keys"à type.[8]"".Level$go.string."values"°"type.[8][]"".Hookà(go.string."overflow"€Ftype.*map.bucket["".Level][]"".Hookþ\go.string."map.hdr[logrus.Level][]logrus.Hook"pf"map.hdr[logrus.Level][]logrus.Hook \go.string."map.hdr[logrus.Level][]logrus.Hook"þ>type.map.hdr["".Level][]"".Hookàà0’-/¡  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000P\go.string."map.hdr[logrus.Level][]logrus.Hook"pPgo.weak.type.*map.hdr["".Level][]"".Hook€"runtime.zerovalueÀ>type.map.hdr["".Level][]"".HookÀ&go.string."buckets"àFtype.*map.bucket["".Level][]"".Hook,go.string."oldbuckets"°Ftype.*map.bucket["".Level][]"".Hookþtype.map.hdr["".Level][]"".Hook`à$type."".LevelHooksà,go.string."LevelHooks"ð"go.importpath."".€°$type."".LevelHooks°go.string."Add"Ð$type.func("".Hook)àBtype.func("".LevelHooks, "".Hook)ð""".LevelHooks.Add€""".LevelHooks.Add go.string."Fire"°Htype.func("".Level, *"".Entry) errorÀftype.func("".LevelHooks, "".Level, *"".Entry) errorÐ$"".LevelHooks.Fireà$"".LevelHooks.FireþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·334cb8bc6294eb0b97ffb9b2c8e3805f+þ&go.string."[]uint8"00[]uint8 &go.string."[]uint8"þtype.[]uint8  ß~.8   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P&go.string."[]uint8"p*go.weak.type.*[]uint8€"runtime.zerovaluetype.uint8þ6go.typelink.[]uint8/[]uint8type.[]uint8þ`go.string."func(*logrus.Entry) ([]uint8, error)"pj$func(*logrus.Entry) ([]uint8, error) `go.string."func(*logrus.Entry) ([]uint8, error)"þJtype.func(*"".Entry) ([]uint8, error)°°O"%­3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*logrus.Entry) ([]uint8, error)"p\go.weak.type.*func(*"".Entry) ([]uint8, error)€"runtime.zerovalue €Jtype.func(*"".Entry) ([]uint8, error)ÐJtype.func(*"".Entry) ([]uint8, error)€type.*"".Entrytype.[]uint8 type.errorþ:go.string."*logrus.Formatter"PD*logrus.Formatter :go.string."*logrus.Formatter"þ$type.*"".Formatter  :0x6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."*logrus.Formatter"p6go.weak.type.**"".Formatter€"runtime.zerovalue"type."".Formatterþ8go.string."logrus.Formatter"PBlogrus.Formatter 8go.string."logrus.Formatter"þ$go.string."Format"0.Format $go.string."Format"þ*go.string."Formatter"@4 Formatter *go.string."Formatter"þ"type."".FormatterÀÀÖ¾òè à runtime.algarray0bruntime.gcbits.0x8c000000000000000000000000000000P8go.string."logrus.Formatter"p$type.*"".Formatter€"runtime.zerovalueÀ"type."".FormatterÀ$go.string."Format"àJtype.func(*"".Entry) ([]uint8, error)`ð"type."".Formatterð*go.string."Formatter"€"go.importpath."".À"type."".Formatterþbruntime.gcbits.0x8cc848c4888c44000000000000000000 ŒÈHĈŒDþ2go.string."logrus.Logger"@< logrus.Logger 2go.string."logrus.Logger"þgo.string."Out"0(Out go.string."Out"þ"go.string."Hooks"0,Hooks "go.string."Hooks"þgo.string."mu"0&mu go.string."mu"þ$go.string."Logger"0.Logger $go.string."Logger"þtype."".Logger  8Dãk(,* à runtime.algarray0bruntime.gcbits.0x8cc848c4888c44000000000000000000P2go.string."logrus.Logger"ptype.*"".Logger€"runtime.zerovalueÀtype."".LoggerÀgo.string."Out"àtype.io.Writer"go.string."Hooks"°$type."".LevelHooksà*go.string."Formatter"€"type."".Formatter°"go.string."Level"Ðtype."".Level€go.string."mu""go.importpath."". type.sync.Mutex`Ðtype."".LoggerÐ$go.string."Logger"à"go.importpath."".ð type."".Loggerþ4go.string."*logrus.Logger"@>*logrus.Logger 4go.string."*logrus.Logger"þbgo.string."func(*logrus.Logger, ...interface {})"pl%func(*logrus.Logger, ...interface {}) bgo.string."func(*logrus.Logger, ...interface {})"þLtype.func(*"".Logger, ...interface {})  [R„ñ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."func(*logrus.Logger, ...interface {})"p^go.weak.type.*func(*"".Logger, ...interface {})€"runtime.zerovalue €Ltype.func(*"".Logger, ...interface {})РLtype.func(*"".Logger, ...interface {})€type.*"".Logger&type.[]interface {}þrgo.string."func(*logrus.Logger, string, ...interface {})"€|-func(*logrus.Logger, string, ...interface {}) rgo.string."func(*logrus.Logger, string, ...interface {})"þ\type.func(*"".Logger, string, ...interface {})°°yéÙõ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Prgo.string."func(*logrus.Logger, string, ...interface {})"pngo.weak.type.*func(*"".Logger, string, ...interface {})€"runtime.zerovalue €\type.func(*"".Logger, string, ...interface {})а\type.func(*"".Logger, string, ...interface {})€type.*"".Loggertype.string &type.[]interface {}þˆgo.string."func(*logrus.Logger, string, interface {}) *logrus.Entry" ’8func(*logrus.Logger, string, interface {}) *logrus.Entry ˆgo.string."func(*logrus.Logger, string, interface {}) *logrus.Entry"þjtype.func(*"".Logger, string, interface {}) *"".EntryÀÀä½òÙ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pˆgo.string."func(*logrus.Logger, string, interface {}) *logrus.Entry"p|go.weak.type.*func(*"".Logger, string, interface {}) *"".Entry€"runtime.zerovalue €jtype.func(*"".Logger, string, interface {}) *"".Entryаjtype.func(*"".Logger, string, interface {}) *"".Entry€type.*"".Loggertype.string "type.interface {}°type.*"".Entryþzgo.string."func(*logrus.Logger, logrus.Fields) *logrus.Entry"„1func(*logrus.Logger, logrus.Fields) *logrus.Entry zgo.string."func(*logrus.Logger, logrus.Fields) *logrus.Entry"þTtype.func(*"".Logger, "".Fields) *"".Entry°°ø&©Â3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pzgo.string."func(*logrus.Logger, logrus.Fields) *logrus.Entry"pfgo.weak.type.*func(*"".Logger, "".Fields) *"".Entry€"runtime.zerovalue €Ttype.func(*"".Logger, "".Fields) *"".EntryРTtype.func(*"".Logger, "".Fields) *"".Entry€type.*"".Loggertype."".Fields type.*"".Entryþ^go.string."func(*logrus.Logger) *io.PipeWriter"ph#func(*logrus.Logger) *io.PipeWriter ^go.string."func(*logrus.Logger) *io.PipeWriter"þHtype.func(*"".Logger) *io.PipeWriter  ßv&O3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*logrus.Logger) *io.PipeWriter"pZgo.weak.type.*func(*"".Logger) *io.PipeWriter€"runtime.zerovalue €Htype.func(*"".Logger) *io.PipeWriterÐHtype.func(*"".Logger) *io.PipeWriter€type.*"".Logger&type.*io.PipeWriterþ`go.string."func(*logrus.Logger, *io.PipeReader)"pj$func(*logrus.Logger, *io.PipeReader) `go.string."func(*logrus.Logger, *io.PipeReader)"þJtype.func(*"".Logger, *io.PipeReader)  X2æê3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*logrus.Logger, *io.PipeReader)"p\go.weak.type.*func(*"".Logger, *io.PipeReader)€"runtime.zerovalue €Jtype.func(*"".Logger, *io.PipeReader)РJtype.func(*"".Logger, *io.PipeReader)€type.*"".Logger&type.*io.PipeReaderþ$go.string."Writer"0.Writer $go.string."Writer"þBgo.string."func() *io.PipeWriter"PLfunc() *io.PipeWriter Bgo.string."func() *io.PipeWriter"þ4type.func() *io.PipeWriterYh3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."func() *io.PipeWriter"pFgo.weak.type.*func() *io.PipeWriter€"runtime.zerovalue €4type.func() *io.PipeWriterЀ4type.func() *io.PipeWriter€&type.*io.PipeWriterþ2go.string."writerScanner"@< writerScanner 2go.string."writerScanner"þ@go.string."func(*io.PipeReader)"PJfunc(*io.PipeReader) @go.string."func(*io.PipeReader)"þ2type.func(*io.PipeReader)²Ú83 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."func(*io.PipeReader)"pDgo.weak.type.*func(*io.PipeReader)€"runtime.zerovalue €2type.func(*io.PipeReader)Ð2type.func(*io.PipeReader)€&type.*io.PipeReaderþtype.*"".Loggerðð9Á8ì6ª   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*logrus.Logger"p0go.weak.type.**"".Logger€"runtime.zerovaluetype."".Logger` type.*"".LoggerÀðtype.*"".Loggerð"go.string."Debug"4type.func(...interface {}) Ltype.func(*"".Logger, ...interface {})°$"".(*Logger).DebugÀ$"".(*Logger).DebugÐ$go.string."Debugf"ðDtype.func(string, ...interface {})€\type.func(*"".Logger, string, ...interface {})&"".(*Logger).Debugf &"".(*Logger).Debugf°&go.string."Debugln"Ð4type.func(...interface {})àLtype.func(*"".Logger, ...interface {})ð("".(*Logger).Debugln€("".(*Logger).Debugln"go.string."Error"°4type.func(...interface {})ÀLtype.func(*"".Logger, ...interface {})Ð$"".(*Logger).Errorà$"".(*Logger).Errorð$go.string."Errorf"Dtype.func(string, ...interface {}) \type.func(*"".Logger, string, ...interface {})°&"".(*Logger).ErrorfÀ&"".(*Logger).ErrorfÐ&go.string."Errorln"ð4type.func(...interface {})€Ltype.func(*"".Logger, ...interface {})("".(*Logger).Errorln ("".(*Logger).Errorln°"go.string."Fatal"Ð4type.func(...interface {})àLtype.func(*"".Logger, ...interface {})ð$"".(*Logger).Fatal€$"".(*Logger).Fatal$go.string."Fatalf"°Dtype.func(string, ...interface {})À\type.func(*"".Logger, string, ...interface {})Ð&"".(*Logger).Fatalfà&"".(*Logger).Fatalfð&go.string."Fatalln"4type.func(...interface {}) Ltype.func(*"".Logger, ...interface {})°("".(*Logger).FatallnÀ("".(*Logger).FatallnÐ go.string."Info"ð4type.func(...interface {})€ Ltype.func(*"".Logger, ...interface {}) """.(*Logger).Info  """.(*Logger).Info° "go.string."Infof"Ð Dtype.func(string, ...interface {})à \type.func(*"".Logger, string, ...interface {})ð $"".(*Logger).Infof€ +$"".(*Logger).Infof +$go.string."Infoln"° +4type.func(...interface {})À +Ltype.func(*"".Logger, ...interface {})Ð +&"".(*Logger).Infolnà +&"".(*Logger).Infolnð +"go.string."Panic" 4type.func(...interface {})  Ltype.func(*"".Logger, ...interface {})° $"".(*Logger).PanicÀ $"".(*Logger).PanicÐ $go.string."Panicf"ð Dtype.func(string, ...interface {})€ \type.func(*"".Logger, string, ...interface {}) &"".(*Logger).Panicf  &"".(*Logger).Panicf° &go.string."Panicln"Ð 4type.func(...interface {})à Ltype.func(*"".Logger, ...interface {})ð ("".(*Logger).Panicln€ ("".(*Logger).Panicln "go.string."Print"° 4type.func(...interface {})À Ltype.func(*"".Logger, ...interface {})Ð $"".(*Logger).Printà $"".(*Logger).Printð $go.string."Printf"Dtype.func(string, ...interface {}) \type.func(*"".Logger, string, ...interface {})°&"".(*Logger).PrintfÀ&"".(*Logger).PrintfÐ&go.string."Println"ð4type.func(...interface {})€Ltype.func(*"".Logger, ...interface {})("".(*Logger).Println ("".(*Logger).Println° go.string."Warn"Ð4type.func(...interface {})àLtype.func(*"".Logger, ...interface {})ð""".(*Logger).Warn€""".(*Logger).Warn"go.string."Warnf"°Dtype.func(string, ...interface {})À\type.func(*"".Logger, string, ...interface {})Ð$"".(*Logger).Warnfà$"".(*Logger).Warnfð&go.string."Warning"4type.func(...interface {}) Ltype.func(*"".Logger, ...interface {})°("".(*Logger).WarningÀ("".(*Logger).WarningÐ(go.string."Warningf"ðDtype.func(string, ...interface {})€\type.func(*"".Logger, string, ...interface {})*"".(*Logger).Warningf *"".(*Logger).Warningf°*go.string."Warningln"Ð4type.func(...interface {})àLtype.func(*"".Logger, ...interface {})ð,"".(*Logger).Warningln€,"".(*Logger).Warningln$go.string."Warnln"°4type.func(...interface {})ÀLtype.func(*"".Logger, ...interface {})Ð&"".(*Logger).Warnlnà&"".(*Logger).Warnlnð*go.string."WithField"Rtype.func(string, interface {}) *"".Entry jtype.func(*"".Logger, string, interface {}) *"".Entry°,"".(*Logger).WithFieldÀ,"".(*Logger).WithFieldÐ,go.string."WithFields"ðgo.typelink.[]uintptr/[]uintptrtype.[]uintptrþ,go.string."[4]uintptr"@6 +[4]uintptr ,go.string."[4]uintptr"þtype.[4]uintptrÀÀ l<‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P,go.string."[4]uintptr"p0go.weak.type.*[4]uintptr€"runtime.zerovaluetype.uintptr type.[]uintptrþBgo.typelink.[4]uintptr/[4]uintptrtype.[4]uintptrþbruntime.gcbits.0x88888844440000000000000000000000 ˆˆˆDDþPgo.string."map.iter[string]interface {}"`Zmap.iter[string]interface {} Pgo.string."map.iter[string]interface {}"þgo.string."key"0(key go.string."key"þgo.string."val"0(val go.string."val"þgo.string."t"0$t go.string."t"þgo.string."h"0$h go.string."h"þ go.string."bptr"0*bptr go.string."bptr"þ"go.string."other"0,other "go.string."other"þBtype.map.iter[string]interface {}ððPm8Ÿ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PPgo.string."map.iter[string]interface {}"pTgo.weak.type.*map.iter[string]interface {}€"runtime.zerovalueÀBtype.map.iter[string]interface {}Àgo.string."key"àtype.*stringgo.string."val"°$type.*interface {}àgo.string."t"€type.*uint8°go.string."h"ÐBtype.*map.hdr[string]interface {}€&go.string."buckets" Htype.*map.bucket[string]interface {}Ð go.string."bptr"ðHtype.*map.bucket[string]interface {} "go.string."other"Àtype.[4]uintptrþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.weak.type.**"".JSONFormatter€"runtime.zerovalue*type."".JSONFormatter` ,type.*"".JSONFormatterÀð,type.*"".JSONFormatterð$go.string."Format"Jtype.func(*"".Entry) ([]uint8, error) ptype.func(*"".JSONFormatter, *"".Entry) ([]uint8, error)°4"".(*JSONFormatter).FormatÀ4"".(*JSONFormatter).FormatþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ>type..hashfunc."".TextFormatter6type..hash."".TextFormatterþ:type..eqfunc."".TextFormatter2type..eq."".TextFormatterþ4type..alg."".TextFormatter >type..hashfunc."".TextFormatter:type..eqfunc."".TextFormatterþBgo.string."*logrus.TextFormatter"PL*logrus.TextFormatter Bgo.string."*logrus.TextFormatter"þŽgo.string."func(*logrus.TextFormatter, *logrus.Entry) ([]uint8, error)" ˜;func(*logrus.TextFormatter, *logrus.Entry) ([]uint8, error) Žgo.string."func(*logrus.TextFormatter, *logrus.Entry) ([]uint8, error)"þptype.func(*"".TextFormatter, *"".Entry) ([]uint8, error)ÀÀÚøD¥3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(*logrus.TextFormatter, *logrus.Entry) ([]uint8, error)"p‚go.weak.type.*func(*"".TextFormatter, *"".Entry) ([]uint8, error)€"runtime.zerovalue €ptype.func(*"".TextFormatter, *"".Entry) ([]uint8, error)Рptype.func(*"".TextFormatter, *"".Entry) ([]uint8, error)€,type.*"".TextFormattertype.*"".Entry type.[]uint8°type.errorþ˜go.string."func(*logrus.TextFormatter, *bytes.Buffer, string, interface {})"°¢@func(*logrus.TextFormatter, *bytes.Buffer, string, interface {}) ˜go.string."func(*logrus.TextFormatter, *bytes.Buffer, string, interface {})"þ‚type.func(*"".TextFormatter, *bytes.Buffer, string, interface {})ÀÀj¦ú-3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P˜go.string."func(*logrus.TextFormatter, *bytes.Buffer, string, interface {})"p”go.weak.type.*func(*"".TextFormatter, *bytes.Buffer, string, interface {})€"runtime.zerovalue €‚type.func(*"".TextFormatter, *bytes.Buffer, string, interface {})ÐÀ‚type.func(*"".TextFormatter, *bytes.Buffer, string, interface {})€,type.*"".TextFormatter$type.*bytes.Buffer type.string°"type.interface {}þžgo.string."func(*logrus.TextFormatter, *bytes.Buffer, *logrus.Entry, []string)"°¨Cfunc(*logrus.TextFormatter, *bytes.Buffer, *logrus.Entry, []string) žgo.string."func(*logrus.TextFormatter, *bytes.Buffer, *logrus.Entry, []string)"þ€type.func(*"".TextFormatter, *bytes.Buffer, *"".Entry, []string)ÀÀéèly3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pžgo.string."func(*logrus.TextFormatter, *bytes.Buffer, *logrus.Entry, []string)"p’go.weak.type.*func(*"".TextFormatter, *bytes.Buffer, *"".Entry, []string)€"runtime.zerovalue €€type.func(*"".TextFormatter, *bytes.Buffer, *"".Entry, []string)ÐÀ€type.func(*"".TextFormatter, *bytes.Buffer, *"".Entry, []string)€,type.*"".TextFormatter$type.*bytes.Buffer type.*"".Entry°type.[]stringþ4go.string."appendKeyValue"@>appendKeyValue 4go.string."appendKeyValue"þjgo.string."func(*bytes.Buffer, string, interface {})"€t)func(*bytes.Buffer, string, interface {}) jgo.string."func(*bytes.Buffer, string, interface {})"þ\type.func(*bytes.Buffer, string, interface {})°°g|3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*bytes.Buffer, string, interface {})"pngo.weak.type.*func(*bytes.Buffer, string, interface {})€"runtime.zerovalue €\type.func(*bytes.Buffer, string, interface {})а\type.func(*bytes.Buffer, string, interface {})€$type.*bytes.Buffertype.string "type.interface {}þ0go.string."printColored"@: printColored 0go.string."printColored"þpgo.string."func(*bytes.Buffer, *logrus.Entry, []string)"€z,func(*bytes.Buffer, *logrus.Entry, []string) pgo.string."func(*bytes.Buffer, *logrus.Entry, []string)"þZtype.func(*bytes.Buffer, *"".Entry, []string)°°À ôÑ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."func(*bytes.Buffer, *logrus.Entry, []string)"plgo.weak.type.*func(*bytes.Buffer, *"".Entry, []string)€"runtime.zerovalue €Ztype.func(*bytes.Buffer, *"".Entry, []string)аZtype.func(*bytes.Buffer, *"".Entry, []string)€$type.*bytes.Buffertype.*"".Entry type.[]stringþ,type.*"".TextFormatter¯l:Ï62   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*logrus.TextFormatter"p>go.weak.type.**"".TextFormatter€"runtime.zerovalue*type."".TextFormatter` ,type.*"".TextFormatterÀð,type.*"".TextFormatterð$go.string."Format"Jtype.func(*"".Entry) ([]uint8, error) ptype.func(*"".TextFormatter, *"".Entry) ([]uint8, error)°4"".(*TextFormatter).FormatÀ4"".(*TextFormatter).FormatÐ4go.string."appendKeyValue"à"go.importpath."".ð\type.func(*bytes.Buffer, string, interface {})€‚type.func(*"".TextFormatter, *bytes.Buffer, string, interface {})D"".(*TextFormatter).appendKeyValue D"".(*TextFormatter).appendKeyValue°0go.string."printColored"À"go.importpath."".ÐZtype.func(*bytes.Buffer, *"".Entry, []string)à€type.func(*"".TextFormatter, *bytes.Buffer, *"".Entry, []string)ð@"".(*TextFormatter).printColored€@"".(*TextFormatter).printColoredþbruntime.gcbits.0x84440000000000000000000000000000 „Dþ@go.string."logrus.TextFormatter"PJlogrus.TextFormatter @go.string."logrus.TextFormatter"þ.go.string."ForceColors"@8 ForceColors .go.string."ForceColors"þ2go.string."DisableColors"@< DisableColors 2go.string."DisableColors"þ8go.string."DisableTimestamp"PBDisableTimestamp 8go.string."DisableTimestamp"þ2go.string."FullTimestamp"@< FullTimestamp 2go.string."FullTimestamp"þ4go.string."DisableSorting"@>DisableSorting 4go.string."DisableSorting"þ2go.string."TextFormatter"@< TextFormatter 2go.string."TextFormatter"þ*type."".TextFormatterðð {,˜£, 4type..alg."".TextFormatter0bruntime.gcbits.0x84440000000000000000000000000000P@go.string."logrus.TextFormatter"p,type.*"".TextFormatter€"runtime.zerovalueÀ*type."".TextFormatterÀ.go.string."ForceColors"àtype.bool2go.string."DisableColors"°type.boolà8go.string."DisableTimestamp"€type.bool°2go.string."FullTimestamp"Ðtype.bool€6go.string."TimestampFormat" type.stringÐ4go.string."DisableSorting"ðtype.bool` *type."".TextFormatter 2go.string."TextFormatter"°"go.importpath."".Àð*type."".TextFormatterþ*go.string."[20]uint8"@4 [20]uint8 *go.string."[20]uint8"þtype.[20]uint8ÀÀ~¨…‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."[20]uint8"p.go.weak.type.*[20]uint8€"runtime.zerovaluetype.uint8 type.[]uint8þ>go.typelink.[20]uint8/[20]uint8type.[20]uint8þ(go.string."[4]uint8"@2[4]uint8 (go.string."[4]uint8"þtype.[4]uint8ÀÀ„B‘ € runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P(go.string."[4]uint8"p,go.weak.type.*[4]uint8€"runtime.zerovaluetype.uint8 type.[]uint8þ:go.typelink.[4]uint8/[4]uint8type.[4]uint8þ6go.string."*logrus.Termios"@@*logrus.Termios 6go.string."*logrus.Termios"þ type.*"".Termios  ÐP­¼6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*logrus.Termios"p2go.weak.type.**"".Termios€"runtime.zerovaluetype."".Termiosþ4go.string."logrus.Termios"@>logrus.Termios 4go.string."logrus.Termios"þ"go.string."Iflag"0,Iflag "go.string."Iflag"þ"go.string."Oflag"0,Oflag "go.string."Oflag"þ"go.string."Cflag"0,Cflag "go.string."Cflag"þ"go.string."Lflag"0,Lflag "go.string."Lflag"þgo.string."Cc"0&Cc go.string."Cc"þ*go.string."Pad_cgo_0"@4 Pad_cgo_0 *go.string."Pad_cgo_0"þ$go.string."Ispeed"0.Ispeed $go.string."Ispeed"þ$go.string."Ospeed"0.Ospeed $go.string."Ospeed"þ&go.string."Termios"00Termios &go.string."Termios"þtype."".TermiosHôÛ†i™ 48@4  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P4go.string."logrus.Termios"p type.*"".Termios€"runtime.zerovalueÀtype."".TermiosÀ"go.string."Iflag"àtype.uint64"go.string."Oflag"°type.uint64à"go.string."Cflag"€type.uint64°"go.string."Lflag"Ðtype.uint64€go.string."Cc" type.[20]uint8Ð*go.string."Pad_cgo_0"ðtype.[4]uint8 $go.string."Ispeed"Àtype.uint64ð$go.string."Ospeed"type.uint64`Àtype."".TermiosÀ&go.string."Termios"Ð"go.importpath."".àtype."".TermiosþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ +__.PKGDEF 0 0 0 644 19986 ` +go object darwin amd64 go1.4.2 X:precisestack + +$$ +package opts + import net "net" + import runtime "runtime" + import bufio "bufio" + import volume "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume" + import ulimit "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" + import os "os" + import strings "strings" + import fmt "fmt" + import regexp "regexp" + import parsers "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers" + import path "path" + import syntax "regexp/syntax" // indirect + type @"regexp/syntax".InstOp uint8 + func (@"regexp/syntax".i·2 @"regexp/syntax".InstOp) String () (? string) { if uint(@"regexp/syntax".i·2) >= uint(len(@"regexp/syntax".instOpNames)) { return "" }; return @"regexp/syntax".instOpNames[@"regexp/syntax".i·2] } + type @"regexp/syntax".Inst struct { Op @"regexp/syntax".InstOp; Out uint32; Arg uint32; Rune []rune } + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchEmptyWidth (@"regexp/syntax".before·3 rune, @"regexp/syntax".after·4 rune) (? bool) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchRune (@"regexp/syntax".r·3 rune) (? bool) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchRunePos (@"regexp/syntax".r·3 rune) (? int) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") String () (? string) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") @"regexp/syntax".op () (? @"regexp/syntax".InstOp) + type @"regexp/syntax".EmptyOp uint8 + type @"regexp/syntax".Prog struct { Inst []@"regexp/syntax".Inst; Start int; NumCap int } + func (@"regexp/syntax".p·3 *@"regexp/syntax".Prog "esc:0x0") Prefix () (@"regexp/syntax".prefix·1 string, @"regexp/syntax".complete·2 bool) + func (@"regexp/syntax".p·2 *@"regexp/syntax".Prog "esc:0x0") StartCond () (? @"regexp/syntax".EmptyOp) + func (@"regexp/syntax".p·2 *@"regexp/syntax".Prog "esc:0x0") String () (? string) + func (@"regexp/syntax".p·3 *@"regexp/syntax".Prog "esc:0x1") @"regexp/syntax".skipNop (@"regexp/syntax".pc·4 uint32) (? *@"regexp/syntax".Inst, ? uint32) + type @"regexp".onePassInst struct { ? @"regexp/syntax".Inst; Next []uint32 } + type @"regexp".onePassProg struct { Inst []@"regexp".onePassInst; Start int; NumCap int } + import sync "sync" // indirect + type @"sync".Mutex struct { @"sync".state int32; @"sync".sema uint32 } + func (@"sync".m·1 *@"sync".Mutex) Lock () + func (@"sync".m·1 *@"sync".Mutex) Unlock () + type @"regexp".thread struct { @"regexp".inst *@"regexp/syntax".Inst; @"regexp".cap []int } + type @"regexp".entry struct { @"regexp".pc uint32; @"regexp".t *@"regexp".thread } + type @"regexp".queue struct { @"regexp".sparse []uint32; @"regexp".dense []@"regexp".entry } + type @"regexp".inputBytes struct { @"regexp".str []byte } + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return true } + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) + func (@"regexp".i·3 *@"regexp".inputBytes "esc:0x0") @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"regexp".inputString struct { @"regexp".str string } + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return true } + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) + func (@"regexp".i·3 *@"regexp".inputString "esc:0x0") @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + import io "io" // indirect + type @"io".RuneReader interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error) } + type @"regexp".inputReader struct { @"regexp".r @"io".RuneReader; @"regexp".atEOT bool; @"regexp".pos int } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return false } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) { return @"regexp/syntax".EmptyOp(0x0) } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) { return false } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) { return -0x1 } + func (@"regexp".i·3 *@"regexp".inputReader) @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"regexp".input interface { @"regexp".canCheckPrefix() (? bool); @"regexp".context(@"regexp".pos int) (? @"regexp/syntax".EmptyOp); @"regexp".hasPrefix(@"regexp".re *@"regexp".Regexp) (? bool); @"regexp".index(@"regexp".re *@"regexp".Regexp, @"regexp".pos int) (? int); @"regexp".step(@"regexp".pos int) (@"regexp".r rune, @"regexp".width int) } + type @"regexp".machine struct { @"regexp".re *@"regexp".Regexp; @"regexp".p *@"regexp/syntax".Prog; @"regexp".op *@"regexp".onePassProg; @"regexp".q0 @"regexp".queue; @"regexp".q1 @"regexp".queue; @"regexp".pool []*@"regexp".thread; @"regexp".matched bool; @"regexp".matchcap []int; @"regexp".inputBytes @"regexp".inputBytes; @"regexp".inputString @"regexp".inputString; @"regexp".inputReader @"regexp".inputReader } + func (@"regexp".m·2 *@"regexp".machine) @"regexp".add (@"regexp".q·3 *@"regexp".queue, @"regexp".pc·4 uint32, @"regexp".pos·5 int, @"regexp".cap·6 []int "esc:0x0", @"regexp".cond·7 @"regexp/syntax".EmptyOp, @"regexp".t·8 *@"regexp".thread) (? *@"regexp".thread) + func (@"regexp".m·2 *@"regexp".machine) @"regexp".alloc (@"regexp".i·3 *@"regexp/syntax".Inst) (? *@"regexp".thread) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".clear (@"regexp".q·2 *@"regexp".queue) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".free (@"regexp".t·2 *@"regexp".thread) { @"regexp".m·1.@"regexp".inputBytes.@"regexp".str = nil; @"regexp".m·1.@"regexp".inputString.@"regexp".str = ""; @"regexp".m·1.@"regexp".inputReader.@"regexp".r = nil; @"regexp".m·1.@"regexp".pool = append(@"regexp".m·1.@"regexp".pool, @"regexp".t·2) } + func (@"regexp".m·1 *@"regexp".machine) @"regexp".init (@"regexp".ncap·2 int) + func (@"regexp".m·2 *@"regexp".machine) @"regexp".match (@"regexp".i·3 @"regexp".input, @"regexp".pos·4 int) (? bool) + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputBytes (@"regexp".b·3 []byte) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputBytes.@"regexp".str = @"regexp".b·3; return &@"regexp".m·2.@"regexp".inputBytes } + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputReader (@"regexp".r·3 @"io".RuneReader) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputReader.@"regexp".r = @"regexp".r·3; @"regexp".m·2.@"regexp".inputReader.@"regexp".atEOT = false; @"regexp".m·2.@"regexp".inputReader.@"regexp".pos = 0x0; return &@"regexp".m·2.@"regexp".inputReader } + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputString (@"regexp".s·3 string) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputString.@"regexp".str = @"regexp".s·3; return &@"regexp".m·2.@"regexp".inputString } + func (@"regexp".m·2 *@"regexp".machine) @"regexp".onepass (@"regexp".i·3 @"regexp".input, @"regexp".pos·4 int) (? bool) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".step (@"regexp".runq·2 *@"regexp".queue, @"regexp".nextq·3 *@"regexp".queue, @"regexp".pos·4 int, @"regexp".nextPos·5 int, @"regexp".c·6 rune, @"regexp".nextCond·7 @"regexp/syntax".EmptyOp) + type @"regexp".Regexp struct { @"regexp".expr string; @"regexp".prog *@"regexp/syntax".Prog; @"regexp".onepass *@"regexp".onePassProg; @"regexp".prefix string; @"regexp".prefixBytes []byte; @"regexp".prefixComplete bool; @"regexp".prefixRune rune; @"regexp".prefixEnd uint32; @"regexp".cond @"regexp/syntax".EmptyOp; @"regexp".numSubexp int; @"regexp".subexpNames []string; @"regexp".longest bool; @"regexp".mu @"sync".Mutex; @"regexp".machine []*@"regexp".machine } + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") Expand (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 []byte "esc:0x0", @"regexp".src·5 []byte "esc:0x0", @"regexp".match·6 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") ExpandString (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 string, @"regexp".src·5 string "esc:0x0", @"regexp".match·6 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) Find (@"regexp".b·3 []byte) (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAll (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllIndex (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllString (@"regexp".s·3 string, @"regexp".n·4 int) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringIndex (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringSubmatch (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]string) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringSubmatchIndex (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllSubmatch (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllSubmatchIndex (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindIndex (@"regexp".b·3 []byte) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindReaderIndex (@"regexp".r·3 @"io".RuneReader) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindReaderSubmatchIndex (@"regexp".r·3 @"io".RuneReader) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindString (@"regexp".s·3 string) (? string) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringIndex (@"regexp".s·3 string) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringSubmatch (@"regexp".s·3 string) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringSubmatchIndex (@"regexp".s·3 string) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindSubmatch (@"regexp".b·3 []byte) (? [][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindSubmatchIndex (@"regexp".b·3 []byte) (? []int) + func (@"regexp".re·3 *@"regexp".Regexp "esc:0x1") LiteralPrefix () (@"regexp".prefix·1 string, @"regexp".complete·2 bool) { return @"regexp".re·3.@"regexp".prefix, @"regexp".re·3.@"regexp".prefixComplete } + func (@"regexp".re·1 *@"regexp".Regexp "esc:0x0") Longest () { @"regexp".re·1.@"regexp".longest = true } + func (@"regexp".re·2 *@"regexp".Regexp) Match (@"regexp".b·3 []byte) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp) MatchReader (@"regexp".r·3 @"io".RuneReader) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp) MatchString (@"regexp".s·3 string) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") NumSubexp () (? int) { return @"regexp".re·2.@"regexp".numSubexp } + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAll (@"regexp".src·3 []byte, @"regexp".repl·4 []byte "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllFunc (@"regexp".src·3 []byte, @"regexp".repl·4 func(? []byte) (? []byte) "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllLiteral (@"regexp".src·3 []byte, @"regexp".repl·4 []byte "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllLiteralString (@"regexp".src·3 string, @"regexp".repl·4 string "esc:0x0") (? string) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllString (@"regexp".src·3 string, @"regexp".repl·4 string) (? string) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllStringFunc (@"regexp".src·3 string, @"regexp".repl·4 func(? string) (? string) "esc:0x0") (? string) + func (@"regexp".re·2 *@"regexp".Regexp) Split (@"regexp".s·3 string, @"regexp".n·4 int) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x1") String () (? string) { return @"regexp".re·2.@"regexp".expr } + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x1") SubexpNames () (? []string) { return @"regexp".re·2.@"regexp".subexpNames } + func (@"regexp".re·1 *@"regexp".Regexp) @"regexp".allMatches (@"regexp".s·2 string, @"regexp".b·3 []byte, @"regexp".n·4 int, @"regexp".deliver·5 func(? []int) "esc:0x0") + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".doExecute (@"regexp".r·3 @"io".RuneReader, @"regexp".b·4 []byte, @"regexp".s·5 string, @"regexp".pos·6 int, @"regexp".ncap·7 int) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") @"regexp".expand (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 string, @"regexp".bsrc·5 []byte "esc:0x0", @"regexp".src·6 string "esc:0x0", @"regexp".match·7 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".get () (? *@"regexp".machine) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") @"regexp".pad (@"regexp".a·3 []int "esc:0x2") (? []int) + func (@"regexp".re·1 *@"regexp".Regexp) @"regexp".put (@"regexp".z·2 *@"regexp".machine) + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".replaceAll (@"regexp".bsrc·3 []byte, @"regexp".src·4 string, @"regexp".nmatch·5 int, @"regexp".repl·6 func(@"regexp".dst []byte, @"regexp".m []int) (? []byte) "esc:0x0") (? []byte) + var @"".EnvironmentVariableRegexp *@"regexp".Regexp + func @"".ParseEnvFile (@"".filename·3 string) (? []string, ? error) + type @"".ErrBadEnvVariable struct { @"".msg string } + func (@"".e·2 @"".ErrBadEnvVariable) Error () (? string) + var @"".DefaultHost string + type @"net".IPMask []byte + func (@"net".m·3 @"net".IPMask "esc:0x0") Size () (@"net".ones·1 int, @"net".bits·2 int) + func (@"net".m·2 @"net".IPMask "esc:0x0") String () (? string) + type @"net".IP []byte + func (@"net".ip·2 @"net".IP "esc:0x0") DefaultMask () (? @"net".IPMask) + func (@"net".ip·2 @"net".IP "esc:0x0") Equal (@"net".x·3 @"net".IP "esc:0x0") (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsGlobalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsInterfaceLocalMulticast () (? bool) { return len(@"net".ip·2) == 0x10 && @"net".ip·2[0x0] == byte(0xFF) && @"net".ip·2[0x1] & byte(0xF) == byte(0x1) } + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLoopback () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsUnspecified () (? bool) + func (@"net".ip·3 @"net".IP "esc:0x0") MarshalText () (? []byte, ? error) + func (@"net".ip·2 @"net".IP "esc:0x0") Mask (@"net".mask·3 @"net".IPMask "esc:0x0") (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x0") String () (? string) + func (@"net".ip·2 @"net".IP "esc:0x2") To16 () (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x2") To4 () (? @"net".IP) + func (@"net".ip·2 *@"net".IP "esc:0x0") UnmarshalText (@"net".text·3 []byte "esc:0x0") (? error) + type @"".IpOpt struct { ? *@"net".IP } + func (@"".o·2 *@"".IpOpt "esc:0x0") Set (@"".val·3 string) (? error) + func (@"".o·2 *@"".IpOpt "esc:0x0") String () (? string) + func @"".NewIpOpt (@"".ref·2 *@"net".IP, @"".defaultVal·3 string) (? *@"".IpOpt) + var @"".DefaultHTTPHost string + var @"".DefaultHTTPPort int + var @"".DefaultUnixSocket string + type @"".ValidatorFctType func(@"".val string) (? string, ? error) + type @"".ListOpts struct { @"".values *[]string; @"".validator @"".ValidatorFctType } + func (@"".opts·1 *@"".ListOpts) Delete (@"".key·2 string "esc:0x0") + func (@"".opts·2 *@"".ListOpts "esc:0x0") Get (@"".key·3 string "esc:0x0") (? bool) + func (@"".opts·2 *@"".ListOpts "esc:0x1") GetAll () (? []string) { return *@"".opts·2.@"".values } + func (@"".opts·2 *@"".ListOpts "esc:0x0") GetMap () (? map[string]struct {}) + func (@"".opts·2 *@"".ListOpts "esc:0x0") Len () (? int) { return len(*@"".opts·2.@"".values) } + func (@"".opts·2 *@"".ListOpts) Set (@"".value·3 string) (? error) + func (@"".opts·2 *@"".ListOpts) String () (? string) + func @"".NewListOpts (@"".validator·2 @"".ValidatorFctType) (? @"".ListOpts) + func @"".NewListOptsRef (@"".values·2 *[]string, @"".validator·3 @"".ValidatorFctType) (? *@"".ListOpts) { return (&@"".ListOpts{ @"".values:@"".values·2, @"".validator:@"".validator·3 }) } + type @"".MapOpts struct { @"".values map[string]string; @"".validator @"".ValidatorFctType } + func (@"".opts·2 *@"".MapOpts "esc:0x0") Set (@"".value·3 string) (? error) + func (@"".opts·2 *@"".MapOpts) String () (? string) + func @"".NewMapOpts (@"".values·2 map[string]string, @"".validator·3 @"".ValidatorFctType) (? *@"".MapOpts) { if @"".values·2 == nil { @"".values·2 = make(map[string]string, 0x0) }; return (&@"".MapOpts{ @"".values:@"".values·2, @"".validator:@"".validator·3 }) } + type @"".ValidatorFctListType func(@"".val string) (? []string, ? error) + func @"".ValidateAttach (@"".val·3 string "esc:0x2") (? string, ? error) + func @"".ValidateLink (@"".val·3 string) (? string, ? error) + func @"".ValidateDevice (@"".val·3 string) (? string, ? error) + func @"".ValidatePath (@"".val·3 string) (? string, ? error) + func @"".ValidateEnv (@"".val·3 string) (? string, ? error) + func @"".ValidateIPAddress (@"".val·3 string) (? string, ? error) + func @"".ValidateMACAddress (@"".val·3 string "esc:0x2") (? string, ? error) + func @"".ValidateDNSSearch (@"".val·3 string) (? string, ? error) + func @"".ValidateExtraHost (@"".val·3 string) (? string, ? error) + func @"".ValidateLabel (@"".val·3 string) (? string, ? error) + func @"".ValidateHost (@"".val·3 string) (? string, ? error) + type @"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Rlimit struct { Type int "json:\"type,omitempty\""; Hard uint64 "json:\"hard,omitempty\""; Soft uint64 "json:\"soft,omitempty\"" } + type @"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit struct { Name string; Hard int64; Soft int64 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".u·3 *@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit) GetRlimit () (? *@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Rlimit, ? error) + func (@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".u·2 *@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit) String () (? string) + type @"".UlimitOpt struct { @"".values *map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit } + func (@"".o·2 *@"".UlimitOpt "esc:0x0") GetList () (? []*@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit) + func (@"".o·2 *@"".UlimitOpt "esc:0x0") Set (@"".val·3 string) (? error) + func (@"".o·2 *@"".UlimitOpt "esc:0x0") String () (? string) + func @"".NewUlimitOpt (@"".ref·2 *map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit) (? *@"".UlimitOpt) { if @"".ref·2 == nil { @"".ref·2 = (&map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit".Ulimit{ }) }; return (&@"".UlimitOpt{ @"".values:@"".ref·2 }) } + func @"".init () + var @"regexp/syntax".instOpNames []string + +$$ +_go_.6 0 0 0 644 198312 ` +go object darwin amd64 go1.4.2 X:precisestack + +! +go13ldbufio.a +fmt.aos.aregexp.astrings.a +net.a path.a¢github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers.a˜github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.a github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.aþ"".ParseEnvFile€7î6eH‹ %H„$hþÿÿH;AwèëâHìHÇ„$0HÇ„$8HÇ„$@HÇ„$HHÇ„$PH‹œ$ H‰$H‹œ$(H‰\$èH‹T$H‹L$H‹\$ H‰œ$àHƒùH‰Œ$Ø„HH‰$èH‹t$Hƒþtb1í1ÒH‰´$°H‰´$0H‰¬$¸H‰¬$8H‰”$ÀH‰”$@H‹œ$ØH‰œ$HH‹œ$àH‰œ$PèHÄÉëšH‰T$PH‰$H QjèYYH…À…] HH‰$èH‹\$Hƒû„7 1É1ÒH‰œ$hH‰Œ$pH‰”$xH‹\$PH‰\$`H‹1íH9è„Í H‹L$`H‰„$H‰„$¨H‰Œ$ H‰Œ$°HH‰$HÇD$HÇD$èH‹\$H‰œ$àH‹\$ H‰œ$èH‹\$(H‰œ$ðHH‰$èH‹|$H‰ùHƒÿ„6 1ÀèH‰L$XH‰ $Hƒ<$„ H‹œ$¨H‰\$H‹œ$°H‰\$èH‹\$XH‰$Hƒ<$„Ð +Hƒ$HH‰\$èH‹D$XHÇ@H‰$Hƒ<$„’ +Hƒ$8H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$èH‹\$XH‰\$HH‹\$HH‰$èH‹T$H¶\$€û„8 HÇD$hHÇD$pHƒú„ Hj H$H‰ßH‰îH¥H¥H¥èL‹L$H‹t$ L‰Œ$H‰´$L‰L$hL‰Œ$ÈH‰t$pH‰´$ÐHƒþŽ«L‰Œ$˜HH‹;H‰¼$¸H‹CH‰´$ H‰„$ÀH9ÆŒyH9Æ‚wL‰Œ$H‰„$H9À…WL‰ $H‰D$H‰|$H‰D$èL‹Œ$ÈH‹´$ж\$ €û„!HÇÀ<… L‰ $H‰t$HHl$H‰ïH‰ÞH¥H¥HÇD$ èH‹T$(H‹L$0H‹D$8H‰„$H‰”$€HƒùH‰Œ$ˆ†¨H,$H‰ïH‰ÖH¥H¥H‹H‰\$H‹H‰\$èH‹T$ H‹L$(H‹H‰$H‰”$ˆH‰T$H‰Œ$H‰L$èH‹¬$ˆH‹”$¶\$€û…ñH‰¬$HH‰”$PHœ$XHÇHÇCHœ$XHƒû„±HÇÅHÇÂH‰œ$˜H‰¬$ H‰”$¨HH‰$Hœ$HH‰\$èH‹T$H‹L$H‹œ$˜H‰$H‰”$øH‰T$H‰Œ$H‰L$èHH,$H‰ïH‰ÞH¥H¥H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$ èH‹T$(H‹L$0Hœ$8HÇHÇCH‰”$H‰”$8H‰Œ$H‰Œ$@HH‰$èH‹t$Hƒþ„‘1Ò1íH‰´$°H‰´$0H‰”$¸H‰”$8H‰¬$ÀH‰¬$@HH‰$HH‰\$HH‰\$Hœ$8H‰\$èH‹\$ H‰œ$HH‹\$(H‰œ$PèHÄÉéhÿÿÿ‰éHþÿÿH‹œ$ˆHƒûŽqH‰¬$HH‰”$PH¼$ø1ÀèHœ$øHƒû„9HÇÁHÇÂH‰œ$˜H‰Œ$ H‰”$¨HH‰$Hœ$HH‰\$èH‹L$H‹D$H‹œ$˜H‰$H‰Œ$øH‰L$H‰„$H‰D$èHH‰$H‹œ$€Hƒ¼$ˆ†˜HƒÃH‰\$èH‹L$H‹D$H‹œ$˜HƒÃH‰$H‰Œ$øH‰L$H‰„$H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$ èH‹\$(H‰œ$H‹\$0H‰œ$H‹”$hH‹Œ$pH‹œ$xH‰ØH)ËHƒû}OHH‰$H‰”$°H‰T$H‰Œ$¸H‰L$H‰„$ÀH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰ÓH‰¬$¸H‰„$ÀH‰”$°H‰ÍHkíHëH‰$H‹œ$H‰\$H‹œ$H‰\$èH‹”$°H‹Œ$¸H‹„$ÀH‰”$hH‰Œ$pH‰„$xéùÿÿè ‰éÀýÿÿH‹œ$ÈH‰$H‹œ$ÐH‰\$èH‹\$H‰œ$HH‹\$H‰œ$PH‹œ$ÈH‰$H‹œ$ÐH‰\$èH‹\$H‰œ$(H‹\$H‰œ$0H¼$ø1ÀèHœ$øHƒû„HÇÂHÇÁH‰œ$˜H‰”$ H‰Œ$¨HH‰$Hœ$HH‰\$èH‹L$H‹D$H‹œ$˜H‰$H‰Œ$øH‰L$H‰„$H‰D$èHH‰$Hœ$(H‰\$èH‹L$H‹D$H‹œ$˜HƒÃH‰$H‰Œ$øH‰L$H‰„$H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$ èH‹\$(H‰œ$H‹\$0H‰œ$H‹”$hH‹Œ$pH‹œ$xH‰ØH)ËHƒû}OHH‰$H‰”$ÈH‰T$H‰Œ$ÐH‰L$H‰„$ØH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰ÓH‰¬$ÐH‰„$ØH‰”$ÈH‰ÍHkíHëH‰$H‹œ$H‰\$H‹œ$H‰\$èH‹”$ÈH‹Œ$ÐH‹„$ØH‰”$hH‰Œ$pH‰„$xéÏöÿÿ‰éÚýÿÿè é¼öÿÿ1Àéß÷ÿÿè ‰éäöÿÿH‰ÖHÇD$xHÇ„$€H‰T$@Hƒú„ÐH‹J`H‹RhH‰”$ðH‰Œ$èH‹-H9é…”H‰ $H‰T$H‹-H‰l$H‹-H‰l$èH‹t$@¶\$ €ût_1Ò1ÉH‹œ$hH‰œ$0H‹œ$pH‰œ$8H‹œ$xH‰œ$@H‰T$xH‰”$HH‰Œ$€H‰Œ$PèHÄÃHƒþt +H‹V`H‹Nh땉ëò‰é)ÿÿÿ‰%ébõÿÿ‰%é$õÿÿ‰%éåôÿÿ‰éÃôÿÿHH‰$HH‰\$HH‰\$èH‹D$éôÿÿ‰éÂóÿÿèHÄä +00runtime.morestack_noctxtöos.OpenÖtype.[0]stringè"runtime.newobject²&runtime.deferreturnê&os.(*File).Close·fú"runtime.deferprocžtype.[0]string°"runtime.newobject¨4go.itab.*os.File.io.Reader–type.[]uint8Ì"runtime.makeslice¨$type.bufio.Scannerº"runtime.newobjectìÄ runtime.duffzeroÒ 2runtime.writebarrieriface’ +$bufio.ScanLines·f¦ +.runtime.writebarrierptrÀ 2runtime.writebarriersliceð *bufio.(*Scanner).Scan€ 2runtime.slicebytetostringšgo.string."#"Þ runtime.eqstringØgo.string."="’strings.SplitNž"".whiteSpaces¶"".whiteSpacesÊ strings.TrimLeftì8"".EnvironmentVariableRegexp²8regexp.(*Regexp).MatchStringºtype.stringæruntime.convT2EÐ2runtime.writebarrierifaceÞzgo.string."variable '%s' is not a valid environment variable"Òfmt.Sprintfâtype.[0]stringô"runtime.newobjectˆ2type."".ErrBadEnvVariabležtype.error¶Dgo.itab."".ErrBadEnvVariable.erroräruntime.convT2I¤&runtime.deferreturn²ð runtime.duffzero°type.stringÜruntime.convT2EÆ2runtime.writebarrierifaceÔtype.string¦runtime.convT2E˜ 2runtime.writebarrieriface¦ "go.string."%s=%s"š!fmt.Sprintf¤"type.[]string–#"runtime.growsliceÐ$4runtime.writebarrierstringÄ%$runtime.panicindex’&"strings.TrimSpace‚'os.GetenvÔ'ð runtime.duffzeroÒ(type.stringþ(runtime.convT2Eè)2runtime.writebarrierifaceö)type.string¢*runtime.convT2E”+2runtime.writebarrieriface¢+"go.string."%s=%s"–,fmt.Sprintf -type.[]string’."runtime.growsliceÌ/4runtime.writebarrierstringÎ0$runtime.panicindexô0$runtime.panicslice’2 io.EOFÄ2 io.EOFÜ2 io.EOFð2runtime.ifaceeq¼4&runtime.deferreturnæ5type.*os.Fileü5type.io.Reader”64go.itab.*os.File.io.Reader¨6 runtime.typ2ItabÖ6&runtime.deferreturnp°‚"".autotmp_0060ßtype.error"".autotmp_0059type.uint64"".autotmp_0058type.uint64"".autotmp_0057type.int"".autotmp_0056type.int"".autotmp_0055type.[]string"".autotmp_0054"type.interface {}"".autotmp_0053"type.interface {}"".autotmp_0052*type.*[2]interface {}"".autotmp_0051&type.[]interface {}"".autotmp_0050type.uint64"".autotmp_0049type.uint64"".autotmp_0048type.int"".autotmp_0047type.int"".autotmp_0046type.[]string"".autotmp_0045"type.interface {}"".autotmp_0044"type.interface {}"".autotmp_0042&type.[]interface {}"".autotmp_0041type.*[0]string"".autotmp_0040type.[]string"".autotmp_0039¿"type.interface {}"".autotmp_0037ÿ&type.[]interface {}"".autotmp_0034type.string"".autotmp_0032Ÿtype.string"".autotmp_0031ÿ&type.*bufio.Scanner"".autotmp_0030&type.*bufio.Scanner"".autotmp_0028ÿtype.io.Reader"".autotmp_0027type.*[0]string"".autotmp_0025Ïtype.[]string"".autotmp_0024type.[]string"".autotmp_0023type.string"".autotmp_0022ßtype.string"".autotmp_0021type.string"".autotmp_0020(type.[2]interface {}"".autotmp_0019Ÿtype.[]string"".autotmp_0018type.string"".autotmp_0017type.string"".autotmp_0016?(type.[2]interface {}"".autotmp_0015type.int"".autotmp_0014¿2type."".ErrBadEnvVariable"".autotmp_0013type.string"".autotmp_0012Ÿtype.string"".autotmp_0011ÿ(type.[1]interface {}"".autotmp_0009type.string"".autotmp_0008type.[]string"".autotmp_0005type.int"".autotmp_0004type.int"".autotmp_0002otype.[]uint8"".autotmp_0001ïtype.*os.File "".~r0¿type.errorbufio.s·2¯&type.*bufio.Scanner"strings.prefix·3¿type.stringstrings.s·2ÿtype.string "".~r0ßtype.stringbufio.r·2ßtype.io.Reader"".variableŸtype.string"".data¯type.[]string"".lineŸtype.string"".scannerŸ&type.*bufio.Scanner"".linesßtype.[]string "".errÿtype.error +"".fhtype.*os.File "".~r2Ptype.error "".~r1 type.[]string"".filenametype.stringN%°ÿ¯°š ¯°Ë ¯°Œ¯ +À„(a5#;ç!a°DOHñÞ«#$ *þ-] z9d%Ž7Lw÷¶4švQxez~]:'8~Ì~]A9‘1 +> +h(Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9f²/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.goþ"".NewListOptsÀ®eH‹ %H;awèëêHƒì0HÇD$@HÇD$HHH‰$èH‹D$H‹L$8H‰L$HÇD$ HÇD$(HL$ H‰L$H‰ $Hƒ<$t\H‰D$èH‹\$H‰$Hƒ<$t9Hƒ$H‹\$H‰\$èH‹\$HƒûtH‹+H‰l$@H‹kH‰l$HHƒÄ0Éëæ‰%뾉%ë› + 0runtime.morestack_noctxt^type.[]stringp"runtime.newobjectð.runtime.writebarrierptr¸.runtime.writebarrierptr0` "".autotmp_0111 type."".ListOpts"".autotmp_0110/"type.*"".ListOpts"".autotmp_0109"type.*"".ListOpts"".validator?0type."".ValidatorFctType "".~r1 type."".ListOpts"".validator0type."".ValidatorFctType`¦_`àL,Ÿ7@$ETgclocals·6d340c3bdac448a6ef1256f331f68dd3Tgclocals·9307bf1379da22b408b9b243276c0115¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ""".NewListOptsRefÀ¦eH‹ %H;awèëêHƒìHH‰$èH‹D$H‰D$H‰$Hƒ<$tKH‹\$ H‰\$èH‹\$H‰$Hƒ<$t#Hƒ$H‹\$(H‰\$èH‹\$H‰\$0HƒÄÉ%ëÔ‰%ë¬ + 0runtime.morestack_noctxt: type."".ListOptsL"runtime.newobject”.runtime.writebarrierptrÜ.runtime.writebarrierptr00"".autotmp_0112"type.*"".ListOpts "".~r2 "type.*"".ListOpts"".validator0type."".ValidatorFctType"".valuestype.*[]string0f/0 VX%$WTgclocals·bd51743682bd6c0f7b9f2e8e6dffed99Tgclocals·e1ae6533a9e39048ba0735a2264ce16a¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ*"".(*ListOpts).StringÀ¸eH‹ %HD$øH;AwèëåHìˆHÇ„$˜HÇ„$ H‹œ$H‹+Hƒý„H‹]H‰\$pH‹]H‰\$xH‹]H‰œ$€H\$HHÇHÇCH\$HHƒû„½HÇÂHÇÁH‰\$XH‰T$`H‰L$hHH‰$H\$pH‰\$èH‹L$H‹D$H‹\$XH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$ èH‹L$(H‹D$0H‰Œ$˜H‰„$ HĈÉé<ÿÿÿ‰Eéóþÿÿ +*0runtime.morestack_noctxtàtype.[]string†runtime.convT2EÞ2runtime.writebarrierifaceìgo.string."%v"Îfmt.Sprintf0 "".autotmp_0118Ÿ"type.interface {}"".autotmp_0116_&type.[]interface {}"".autotmp_0114/type.[]string"".autotmp_0113(type.[1]interface {} "".~r0type.string"".opts"type.*"".ListOpts"ªà d:¦Âd:Tgclocals·6d340c3bdac448a6ef1256f331f68dd3Tgclocals·7876b70d8da64fa07ca2fd3ecc71f905¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ$"".(*ListOpts).SetÀªeH‹ %HD$ðH;AwèëåHìH‹´$˜HÇ„$°HÇ„$¸H‹^1íH9ë„€H‹œ$ H‰$H‹œ$¨H‰\$H‹VH‹ÿÓH‹´$˜H‹l$H‹T$H‹D$ H‹L$(H‰L$XHƒøH‰D$PtH‰„$°H‰Œ$¸HÄÃH‰l$@H‰¬$ H‰T$HH‰”$¨H‹.Hƒý„/H‹UH‹MH‹]H‰T$xH‰Œ$€H‰œ$ˆH‰ØH)ËHƒû}FHH‰$H‰T$`H‰T$H‰L$hH‰L$H‰D$pH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$hH‰D$pH‰ÓH‰T$`H‰ÍHkíHëH‰$H‹œ$ H‰\$H‹œ$¨H‰\$èH‹T$`H‹L$hH‹D$pH‹œ$˜H‹+H‰,$Hƒ<$tIH‰T$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$èHÇ„$°HÇ„$¸HÄÉ%뮉EéÉþÿÿ +*0runtime.morestack_noctxtâ +œtype.[]stringü"runtime.growslice¤4runtime.writebarrierstringÀ2runtime.writebarriersliceP "".autotmp_0124_type.[]string"".autotmp_0123/type.[]string"".autotmp_0122type.[]string "".errtype.error"".vŸtype.string "".~r10type.error"".valuetype.string"".opts"type.*"".ListOpts(" ”Ÿ ÌŸ  (pBC “ q NATgclocals·61fa3b017c2e156e481b3d912c20f49bTgclocals·1509598f597bd125bdfd9d44972821c7¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ*"".(*ListOpts).DeleteÀ ¬ eH‹ %HD$ H;AwèëåHìàH‹¼$øH‹œ$èH‹+Hƒý„ÏH‹UH‹MH‹]H‰œ$Ø1ÀH‰Œ$ÐH‰L$HH‰”$ÈH‹l$HH9è‡H‰T$XHƒú„€H‹2H‹JH‰D$PH‰D$@H‰t$pH‰L$xH9ù…?H‰t$`H‰4$H‰L$hH‰L$H‹¬$ðH‰l$H‰|$èL‹œ$èH‹¼$øH‹T$XH‹D$P¶\$ €û„íI‹+H‹MH‹D$@H9Á‚ÑI‹+L‹UI‰ÁH‹D$@I‰ÈHÿÀI‹+H‹}I‹+H‹UH9‚žH‰ÆI‹+H‹MH)ÂH‰ûH)ÃH‰ØHƒût H‰óHÁãHËH‰ÙH‰Œ$˜H‰×H‰„$¨L‰”$°L‰ÖL‰Œ$¸L‰„$ÀL‰ÈL‰Œ$ˆH‰”$ HÐL‰„$L)ÀHƒø~[HH‰$H‰´$€H‰t$L‰L$L‰D$H‰D$ èL‹Œ$¸H‹¼$ H‹t$(H‹\$0H‰œ$ˆH‹\$8H‰œ$L‰ËH‰õH‰´$€HkÛHÝH‰,$H‹œ$˜H‰\$H‰ûHÁãH‰\$èH‹”$¸H‹¬$ H‹„$H‹´$€HêH‰´$€H‰”$ˆH‰„$H‹œ$èH‹+H‰,$Hƒ<$t4H‰´$˜H‰t$H‰”$ H‰T$H‰„$¨H‰D$èHÄàÉ%ëÃè è HƒÂHÿÀH‹l$HH9èŒyýÿÿHÄàÉéyýÿÿ‰Eé)ýÿÿ +*0runtime.morestack_noctxt¢ runtime.eqstringþtype.[]stringÈ"runtime.growslice” runtime.memmoveŽ 2runtime.writebarriersliceº $runtime.panicsliceÈ $runtime.panicslice0À,"".autotmp_0152type.uint64"".autotmp_0151type.uint64"".autotmp_0150type.int"".autotmp_0149type.uintptr"".autotmp_0148type.int"".autotmp_0147¿type.[]string"".autotmp_0146type.uintptr"".autotmp_0144type.uint64"".autotmp_0143type.uint64"".autotmp_0142type.int"".autotmp_0141type.[]string"".autotmp_0138_type.[]string"".autotmp_0137ßtype.string"".autotmp_0136type.*string"".autotmp_0135¯type.int"".autotmp_0134Ÿtype.int"".autotmp_0133type.[]string"".autotmp_0132/type.[]string"".kÿtype.string"".i¿type.int "".keytype.string"".opts"type.*"".ListOpts&"À°¿À3¿À *Š*x[Î  Гf}ZTgclocals·2018557e3ee0abccf2865b16663e690bTgclocals·3548d93f69958cd35a7e42f3f32eff97¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ*"".(*ListOpts).GetMap€þeH‹ %HD$øH;AwèëåHìˆHH‰$HÇD$èH‹\$H‰\$0H‹œ$H‹+Hƒý„ÝH‹UH‹EH‹]H‰œ$€1ÉH‰D$xH‰D$ H‰T$pH‰ÐH‹l$ H9é‹H‰D$8Hƒø„‘H‹H‹hH‰L$(H‰T$PH‰l$XH‰T$@H‰T$`H‰l$HH‰l$hH\$ HH‰$H‹\$0H‰\$H\$`H‰\$H\$ H‰\$èH‹D$8H‹L$(HƒÀHÿÁH‹l$ H9éŒuÿÿÿH‹\$0H‰œ$˜HĈÉéhÿÿÿ‰Eéÿÿÿ +*0runtime.morestack_noctxtJ2type.map[string]struct {}nruntime.makemap¢2type.map[string]struct {}ð$runtime.mapassign1 "".autotmp_0169otype.string"".autotmp_0168Ÿtype.*string"".autotmp_0167Ïtype.int"".autotmp_0166¿type.int"".autotmp_0165Ïtype.struct {}"".autotmp_0164Otype.string"".autotmp_0163/type.[]string"".ktype.string "".ret¯2type.map[string]struct {} "".~r02type.map[string]struct {}"".opts"type.*"".ListOpts"À ¢"#pG6ÁITgclocals·2148c3737b2bb476685a1100a2e8343eTgclocals·895437610367c247af3cb64952bed446¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ*"".(*ListOpts).GetAll`^H‹\$H‹+HƒýtH‹]H‰\$H‹]H‰\$H‹]H‰\$ ÉEëß@ "".~r0type.[]string"".opts"type.*"".ListOpts00¸0Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ$"".(*ListOpts).GetÀªeH‹ %H;awèëêHƒìxH‹¼$H‹œ$€H‹+Hƒý„ÖH‹MH‹EH‹]H‰\$p1ÒH‰D$hH‰D$(H‰L$`H‹l$(H9ê’H‰L$8Hƒù„H‹1H‹AH‰T$0H‰t$PH‰D$XH9øuSH‰t$@H‰4$H‰D$HH‰D$H‹¬$ˆH‰l$H‰|$èH‹¼$H‹T$0H‹L$8¶\$ €ût Æ„$˜HƒÄxÃHƒÁHÿÂH‹l$(H9êŒnÿÿÿÆ„$˜HƒÄxÉéiÿÿÿ‰Eé"ÿÿÿ + 0runtime.morestack_noctxtî runtime.eqstring@ð"".autotmp_0174Otype.string"".autotmp_0173type.*string"".autotmp_0172Ÿtype.int"".autotmp_0171type.int"".autotmp_0170/type.[]string"".kotype.string "".~r10type.bool "".keytype.string"".opts"type.*"".ListOpts&ðÉïð!ïð  À"jK  +  ¶jTgclocals·9ff42bf311af152488d11f0f78c8d5ceTgclocals·4398bb51467914f29637b614067b995f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ$"".(*ListOpts).Len@$H‹\$H‹+H‹]H‰\$à  "".~r0type.int"".opts"type.*"".ListOpts  Ö Tgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ""".(*MapOpts).SetÀ¼eH‹ %HD$øH;AwèëåHìˆH‹”$H‹Œ$˜H‹„$ HÇ„$¨HÇ„$°H‹Z1íH9ët^H‰ $H‰D$H‹RH‹ÿÓH‹l$H‹T$H‹D$ H‹L$(H‰L$XHƒøH‰D$PtH‰„$¨H‰Œ$°HĈÃH‰l$@H‰éH‰T$HH‰ÐH‰Œ$˜H‰ $H‰„$ H‰D$HHl$H‰ïH‰ÞH¥H¥HÇD$ èH‹¬$H‹T$(H‹D$0H‹L$8H‰Œ$€HƒøuqHÇD$`HÇD$hHH‰$H‹mH‰l$H‰T$pHƒøH‰D$xv4H‰T$H\$`H‰\$èHÇ„$¨HÇ„$°HĈÃè HH‰$H‹mH‰l$Hƒøv/H‰T$H‰ÓH‰T$pHƒøH‰D$xvHƒÃH‰\$èë—è è  +*0runtime.morestack_noctxtÚ +®go.string."="èstrings.SplitNä,type.map[string]stringÆ$runtime.mapassign1$runtime.panicindex¢,type.map[string]string”$runtime.mapassign1¢$runtime.panicindex°$runtime.panicindexP"".autotmp_0178Otype.string"".vals/type.[]string "".errotype.error"".vtype.string "".~r10type.error"".valuetype.string"".opts type.*"".MapOpts("ˆÛYà<ìR + \J @mõgTgclocals·61fa3b017c2e156e481b3d912c20f49bTgclocals·729deb178891d0cb0d4c5b2058f91105¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ("".(*MapOpts).String€øeH‹ %H;awèëêHƒìpHÇ„$€HÇ„$ˆH‹D$xH‹(HD$HHÇHÇ@HD$HHƒø„˜HÇÂHÇÁH‰T$`H‰L$hH‰D$XH‰$HH‰D$8H‰D$H‰l$@H‰l$èHH,$H‰ïH‰ÞH¥H¥H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$ èH‹L$(H‹D$0H‰Œ$€H‰„$ˆHƒÄpÉéaÿÿÿ + 0runtime.morestack_noctxt‚,type.map[string]string´2runtime.writebarrierifaceÂgo.string."%v"¤fmt.Sprintf0à"".autotmp_0182/&type.[]interface {}"".autotmp_0179O(type.[1]interface {} "".~r0type.string"".opts type.*"".MapOptsàÚßà €Ž2Ι8/Tgclocals·6d340c3bdac448a6ef1256f331f68dd3Tgclocals·403a8d79fd24b295e8557f6970497aa3¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".NewMapOpts „eH‹ %H;awèëêHƒì H‹\$(1íH9ëu#HH‰$HÇD$èH‹\$H‰\$(HH‰$èH‹D$H‰D$H‰$Hƒ<$tKH‹\$(H‰\$èH‹\$H‰$Hƒ<$t#Hƒ$H‹\$0H‰\$èH‹\$H‰\$8HƒÄ É%ëÔ‰%ë¬ + 0runtime.morestack_noctxtR,type.map[string]stringvruntime.makemap˜type."".MapOptsª"runtime.newobjectò.runtime.writebarrierptrº.runtime.writebarrierptr0@"".autotmp_0190 type.*"".MapOpts "".~r2  type.*"".MapOpts"".validator0type."".ValidatorFctType"".values,type.map[string]string@•?@ Ж #X :>XTgclocals·bd51743682bd6c0f7b9f2e8e6dffed99Tgclocals·e1ae6533a9e39048ba0735a2264ce16a¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ""".ValidateAttachàÊeH‹ %HD$¸H;AwèëåHìÈHÇ„$àHÇ„$èHÇ„$ðHÇ„$øH‹œ$ÐH‰$H‹œ$ØH‰\$èL‹L$L‹D$L‰L$pL‰L$`L‰D$xL‰D$hHœ$˜Hƒû„~H-H‰ßH‰îèHÇÀHÇÁH‰Œ$1ÒH‰„$ˆH‰D$@H‰œ$€H‰ÙH‹l$@H9ê¯H‰L$HHƒù„H‹1H‹AH‰T$8H‰t$pH‰t$PH‰D$xH‰D$XI9ÀufL‰ $L‰D$H‰t$H‰D$èL‹L$`L‹D$hH‹T$8H‹L$H¶\$ €ût0L‰Œ$àL‰„$èHÇ„$ðHÇ„$øHÄÈÃHƒÁHÿÂH‹l$@H9êŒQÿÿÿHH,$H‰ïH‰ÞH¥H¥H\$HÇHÇCHÇCèH‹L$(H‹D$0H‹œ$ÐH‰œ$àH‹œ$ØH‰œ$èH‰Œ$ðH‰„$øHÄÈÉéáþÿÿ‰é{þÿÿ +*0runtime.morestack_noctxtØstrings.ToLowerÆ""".statictmp_0194ÜØ runtime.duffcopyð runtime.eqstringÄlgo.string."valid streams are STDIN, STDOUT and STDERR"¢fmt.Errorf`"".autotmp_0200type.string"".autotmp_0199ÿtype.*string"".autotmp_0198type.int"".autotmp_0197type.int"".autotmp_0196_type.[3]string"".autotmp_0192type.[]string"".autotmp_0191¯type.string "".strïtype.string"".sÏtype.string "".~r2@type.error "".~r1 type.string "".valtype.string("猰&¸R<Œ@0 +x kÌ™`Tgclocals·6d3fa487f5e45db9cb9199d2a5e0e216Tgclocals·5de38da5eeb0729bf417a80c29b78c42¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".ValidateLinkàÔeH‹ %H;awèëêHƒì@HÇD$XHÇD$`HÇD$hHÇD$pH‹\$HH‰$H‹\$PH‰\$èH‹l$HH‹T$PH‹D$0H‹L$8HƒøtH‰l$XH‰T$`H‰D$hH‰L$pHƒÄ@ÃH‰l$XH‰T$`HÇD$hHÇD$pHƒÄ@à + 0runtime.morestack_noctxt¤²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers.ParseLink`€ "".~r2@type.error "".~r1 type.string "".valtype.string€n€ °Î>2' +Q_Tgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ""".ValidateDevice eH‹ %H;awèëêHƒì8HÇD$PHÇD$XHÇD$`HÇD$hH‹\$@H‰$H‹\$HH‰\$ÆD$èH‹l$H‹T$ H‹L$(H‹D$0H‰l$PH‰T$XH‰L$`H‰D$hHƒÄ8à + 0runtime.morestack_noctxt®"".validatePath`p "".~r2@type.error "".~r1 type.string "".valtype.stringpmo  â>R +V:Tgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".ValidatePath eH‹ %H;awèëêHƒì8HÇD$PHÇD$XHÇD$`HÇD$hH‹\$@H‰$H‹\$HH‰\$ÆD$èH‹l$H‹T$ H‹L$(H‹D$0H‰l$PH‰T$XH‰L$`H‰D$hHƒÄ8à + 0runtime.morestack_noctxt®"".validatePath`p "".~r2@type.error "".~r1 type.string "".valtype.stringpmo  ò>R +V:Tgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".validatePath€@€@eH‹ %H„$øþÿÿH;AwèëâHìˆHÇ„$¨HÇ„$°HÇ„$¸HÇ„$ÀHÇ„$ˆHÇ„$HÇD$xHÇ„$€H‹œ$H‰$H‹´$˜H‰t$H5Hl$H‰ïH¥H¥èH‹”$H‹Œ$˜H‹\$ HƒûŽ;H‰”$èH‰Œ$ðHœ$øHÇHÇCHœ$øHƒû„ûHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹L$(H‹D$0H‹œ$H‰œ$¨H‹œ$˜H‰œ$°H‰Œ$¸H‰„$ÀHĈÉéþþÿÿH‰$H‰L$HHl$H‰ïH‰ÞH¥H¥HÇD$ èH‹´$H‹¬$˜H‹L$(H‹D$0H‹T$8H‰”$H‰Œ$HƒøH‰„$†~ H‹YHƒû…;H‰´$èH‰¬$ðHœ$øHÇHÇCHœ$øHƒû„ûHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹L$(H‹D$0H‹œ$H‰œ$¨H‹œ$˜H‰œ$°H‰Œ$¸H‰„$ÀHĈÉéþþÿÿHƒø…%H‰ËHƒø†H‹ H‹kH‰Œ$ˆH‰ $H‰¬$H‰l$èH‹t$H‹l$H‰´$H‰¬$˜H‹”$ˆH‰T$HH‹„$H‰D$PHƒøŽ Hƒø†¶€û/…ŠHÇÀ<…KH‹œ$ˆH‰œ$èH‹œ$H‰œ$ðHœ$øHÇHÇCHœ$øHƒû„ûHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹L$(H‹D$0H‹œ$H‰œ$¨H‹œ$˜H‰œ$°H‰Œ$¸H‰„$ÀHĈÉéþþÿÿH‰´$¨H‰¬$°HÇ„$¸HÇ„$ÀHĈÃ1Àévþÿÿè è Hƒø…H‰ËHƒø†ýHƒÃH‹ H‹kH‰L$XH‰l$`HH‰$H‹H‰\$H‰Œ$ØH‰L$H‰¬$àH‰l$èH‹\$ ¶+@ˆl$GH‹L$XH‹D$`HH‰$H‹H‰\$H‰Œ$¸H‰L$H‰„$ÀH‰D$èH‹´$H‹”$H‹\$ ¶+@ˆl$F€|$G…1H‹L$XH‹D$`HH‰$H‹H‰\$H‰Œ$ÈH‰L$H‰„$ÐH‰D$èH‹´$H‹”$H‹\$ ¶+@€ý…Ï1À¶\$F€¼$ „÷<„ïHƒú†ÞH‹H‹FH‰óHƒú†ÃHƒÃH‹+H‰l$xH‹kH‰¬$€H‰Œ$ˆH‰ $H‰„$H‰D$èH‹\$H‰œ$èH‹\$H‰œ$ðH‹\$xH‰œ$¨H‹œ$€H‰œ$°H¼$81ÀèHœ$8Hƒû„.HÇÁHÇÂH‰œ$ H‰Œ$(H‰”$0HH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹t$(H‹l$0H‰´$H‰¬$˜é÷úÿÿ‰éËþÿÿè è H‰óHƒú†¯HƒÃH‹ H‹kH‰Œ$ˆH‰ $H‰¬$H‰l$èH‹\$H‰œ$èH‹\$H‰œ$ðH¼$81ÀèHœ$8Hƒû„DHÇÁHÇÂH‰œ$ H‰Œ$(H‰”$0HH‰$H‹œ$Hƒ¼$†õH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹t$(H‹l$0H‰´$H‰¬$˜é4ùÿÿè ‰éµþÿÿè HÇÀé'üÿÿè Hƒø…ùÿÿH‰ËHƒø†ÍHƒÃH‹+H‰¬$ˆH‹kH‰¬$H‰ËHƒø†žHƒÃ H‹+H‰l$xH‹kH‰¬$€H‰ËHƒø†rHƒÃ H‹ H‹kH‰L$hH‰l$pHH‰$H‹H‰\$H‰Œ$¸H‰L$H‰¬$ÀH‰l$èH‹\$ ¶+@ˆl$FH‹L$hH‹D$pHH‰$H‹H‰\$H‰Œ$ÈH‰L$H‰„$ÐH‰D$èH‹\$ ¶+@ˆl$E€|$F…¶H‹L$hH‹D$pHH‰$H‹H‰\$H‰Œ$ØH‰L$H‰„$àH‰D$èH‹\$ ¶+@€ý…d1À¶\$E€¼$ „P<…HH‹\$xH‰œ$èH‹œ$€H‰œ$ðHœ$øHÇHÇCHœ$øHƒû„ûHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹L$(H‹D$0H‹œ$H‰œ$¨H‹œ$˜H‰œ$°H‰Œ$¸H‰„$ÀHĈÉéþþÿÿH‹œ$ˆH‰œ$èH‹œ$H‰œ$ðH‹\$xH‰œ$¨H‹œ$€H‰œ$°H¼$X1ÀèHœ$XHƒû„šHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$H‹œ$Hƒ¼$†KH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH‰$Hœ$èH‰\$èH‹L$H‹D$H‹œ$ HƒÃH‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$ HƒÃ H‰$H‰Œ$˜H‰L$H‰„$ H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹t$(H‹l$0H‰´$H‰¬$˜éPôÿÿè ‰é_þÿÿHÇÀé’üÿÿè è è è ² +00runtime.morestack_noctxt¼go.string.":"Þstrings.Countètype.string”runtime.convT2Eþ2runtime.writebarrierifaceŒLgo.string."bad format for volumes: %s"€fmt.Errorf²go.string.":"ìstrings.SplitNÖ type.string‚ runtime.convT2Eì 2runtime.writebarrierifaceú Lgo.string."bad format for volumes: %s"î fmt.Errorføpath.Cleanªtype.stringÖruntime.convT2EÀ2runtime.writebarrierifaceÎLgo.string."%s is not an absolute path"Âfmt.ErrorfÌ$runtime.panicindexÚ$runtime.panicindexÄ(type.map[string]boolÚ¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.roModes¢4runtime.mapaccess1_faststrÞ(type.map[string]boolô¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.rwModes¼4runtime.mapaccess1_faststr®(type.map[string]boolĤgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.rwModesŒ4runtime.mapaccess1_faststr²path.Clean¾ð runtime.duffzero¼ type.stringè runtime.convT2EÒ!2runtime.writebarrierifaceà!type.stringŒ"runtime.convT2Eþ"2runtime.writebarrierifaceŒ#"go.string."%s:%s"€$fmt.SprintfÖ$$runtime.panicindexä$$runtime.panicindexÔ%path.Clean¦&ð runtime.duffzero¤'type.stringî'runtime.convT2EØ(2runtime.writebarrierifaceæ(type.string’)runtime.convT2E„*2runtime.writebarrieriface’*"go.string."%s:%s"†+fmt.SprintfÎ+$runtime.panicindexê+$runtime.panicindex,$runtime.panicindex”.(type.map[string]boolª.¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.roModesò.4runtime.mapaccess1_faststr®/(type.map[string]boolÄ/¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.rwModesŒ04runtime.mapaccess1_faststrÞ0(type.map[string]boolô0¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.rwModes¼14runtime.mapaccess1_faststr€4type.string¬4runtime.convT2E–52runtime.writebarrieriface¤5Rgo.string."bad mount mode specified : %s"˜6fmt.ErrorfÂ8è runtime.duffzeroÀ9type.stringŠ:runtime.convT2Eô:2runtime.writebarrieriface‚;type.string®;runtime.convT2E <2runtime.writebarrieriface®<type.stringÚ<runtime.convT2EÌ=2runtime.writebarrierifaceÚ=(go.string."%s:%s:%s"Î>fmt.Sprintf–?$runtime.panicindexÊ?$runtime.panicindexØ?$runtime.panicindexæ?$runtime.panicindexô?$runtime.panicindexp”"".autotmp_0278"type.interface {}"".autotmp_0277*type.*[1]interface {}"".autotmp_0276&type.[]interface {}"".autotmp_0275type.bool"".autotmp_0274"type.interface {}"".autotmp_0273"type.interface {}"".autotmp_0272"type.interface {}"".autotmp_0270&type.[]interface {}"".autotmp_0269"type.interface {}"".autotmp_0268*type.*[1]interface {}"".autotmp_0267&type.[]interface {}"".autotmp_0266type.bool"".autotmp_0265type.bool"".autotmp_0264"type.interface {}"".autotmp_0263"type.interface {}"".autotmp_0262*type.*[2]interface {}"".autotmp_0261&type.[]interface {}"".autotmp_0260"type.interface {}"".autotmp_0259"type.interface {}"".autotmp_0257&type.[]interface {}"".autotmp_0256type.bool"".autotmp_0255…type.bool"".autotmp_0254type.int"".autotmp_0253"type.interface {}"".autotmp_0252*type.*[1]interface {}"".autotmp_0251&type.[]interface {}"".autotmp_0250ß"type.interface {}"".autotmp_0248Ï&type.[]interface {}"".autotmp_0247type.error"".autotmp_0246type.string"".autotmp_0245(type.[1]interface {}"".autotmp_0244type.int"".autotmp_0243type.string"".autotmp_0242type.string"".autotmp_0241type.string"".autotmp_0240_(type.[3]interface {}"".autotmp_0239type.error"".autotmp_0238type.string"".autotmp_0237(type.[1]interface {}"".autotmp_0236type.bool"".autotmp_0235type.string"".autotmp_0234type.bool"".autotmp_0233type.string"".autotmp_0232type.bool"".autotmp_0231type.string"".autotmp_0230type.string"".autotmp_0229type.string"".autotmp_0228(type.[2]interface {}"".autotmp_0227type.string"".autotmp_0226¿type.string"".autotmp_0225type.string"".autotmp_0224Ÿ(type.[2]interface {}"".autotmp_0223ƒtype.bool"".autotmp_0222Ÿtype.string"".autotmp_0220ÿtype.string"".autotmp_0219type.bool"".autotmp_0218type.string"".autotmp_0217ßtype.string"".autotmp_0216type.int"".autotmp_0215type.error"".autotmp_0214type.string"".autotmp_0213(type.[1]interface {}"".autotmp_0210¿type.string"".autotmp_0209Ÿ(type.[1]interface {}path.path·2ÿtype.string¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.mode·3¿type.string¤github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.mode·3ßtype.string"".splitedÿtype.[]string"".modeŸtype.string "".containerPathÿtype.string "".~r3Ptype.error "".~r20type.string("".validateMountMode type.bool "".valtype.stringR%à¶é6ó®€ ÂúUP»T(» +8&IË0' +Ì%Ÿ‘  +(%¬Èì%&!œ®›vvËv…ïv…kµ“›Ì+?Ì$Ä¥¸vù¢$Tgclocals·17574d085f5f5b0763ac1aaf01ce4b67Tgclocals·468082ab545c1bae5a803029688d85e2¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".ValidateEnv€îeH‹ %HD$˜H;AwèëåHìèHÇ„$HÇ„$HÇ„$HÇ„$H‹œ$ðH‰$H‹´$øH‰t$H5Hl$H‰ïH¥H¥èH‹T$ H‹L$(H‹D$0H‰„$ÀHƒù~@H‹œ$ðH‰œ$H‹œ$øH‰œ$HÇ„$HÇ„$HÄèÃH‹H‰$H‰”$°HƒùH‰Œ$¸†«Hl$H‰ïH‰ÖH¥H¥èH‹”$ðH‹Œ$ø¶\$€û…“H‰T$xH‰Œ$€Hœ$ˆHÇHÇCHœ$ˆHƒû„VHÇÂHÇÁH‰œ$˜H‰”$ H‰Œ$¨HH‰$H\$xH‰\$èH‹L$H‹D$H‹œ$˜H‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$ èH‹L$(H‹D$0H\$XHÇHÇCH‰L$hH‰L$XH‰D$pH‰D$`H‹œ$ðH‰œ$H‹œ$øH‰œ$HH‰$HH‰\$HH‰\$H\$XH‰\$èH‹\$ H‰œ$H‹\$(H‰œ$HÄèÉé£þÿÿH‰$H‰L$èH‹”$ðH‹Œ$ø¶\$€ûu0H‰”$H‰Œ$HÇ„$HÇ„$HÄèÃH‰T$xH‰Œ$€H‰$H‰L$èH‹\$H‰\$HH‹\$H‰\$PH¼$È1ÀèHœ$ÈHƒû„7HÇÁHÇÂH‰œ$˜H‰Œ$ H‰”$¨HH‰$H\$xH‰\$èH‹L$H‹D$H‹œ$˜H‰$H‰L$8H‰L$H‰D$@H‰D$èHH‰$H\$HH‰\$èH‹L$H‹D$H‹œ$˜HƒÃH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$ èH‹L$(H‹D$0H‰Œ$H‰„$HÇ„$HÇ„$HÄèÉéÂþÿÿè 6 +*0runtime.morestack_noctxtÜgo.string."="þstrings.SplitÆ8"".EnvironmentVariableRegexpª8regexp.(*Regexp).MatchString¬type.stringÒruntime.convT2E°2runtime.writebarrieriface¾zgo.string."variable '%s' is not a valid environment variable"²fmt.Sprintfä 2type."".ErrBadEnvVariableú type.error’ +Dgo.itab."".ErrBadEnvVariable.errorº +runtime.convT2I¨ "".doesEnvExistò os.Getenv¸ ð runtime.duffzero¶type.stringÜruntime.convT2Eº2runtime.writebarrierifaceÈtype.stringîruntime.convT2EÔ2runtime.writebarrierifaceâ"go.string."%s=%s"Öfmt.Sprintfâ$runtime.panicindex`Ð$"".autotmp_0318"type.interface {}"".autotmp_0317"type.interface {}"".autotmp_0315&type.[]interface {}"".autotmp_0314ß"type.interface {}"".autotmp_0312Ÿ&type.[]interface {}"".autotmp_0311type.string"".autotmp_0310¿type.string"".autotmp_0309type.string"".autotmp_0308?(type.[2]interface {}"".autotmp_0307type.bool"".autotmp_0306Ÿ2type."".ErrBadEnvVariable"".autotmp_0305ÿtype.string"".autotmp_0304ßtype.string"".autotmp_0303¿(type.[1]interface {} "".arrotype.[]string "".~r2@type.error "".~r1 type.string "".valtype.stringB"нÏÐâÏÐ^ÏІÏÐÀ 0ÐRH@W“(0Ž 0~ªp„7eu½F Tgclocals·b343c92068d41d468064df311efb05d1Tgclocals·e77b4954b1067dbe825673e24b2082a5¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ("".ValidateIPAddress€ îeH‹ %HD$ØH;AwèëåHì¨HÇ„$ÀHÇ„$ÈHÇ„$ÐHÇ„$ØH‹œ$°H‰$H‹œ$¸H‰\$èH‹L$H‹D$H‰L$hH‰ $H‰D$pH‰D$èH‹D$H‹T$H‹L$ H‰”$€H‰Œ$ˆHƒøH‰D$xtMH‰$H‰T$H‰L$èH‹L$H‹D$ H‰Œ$ÀH‰„$ÈHÇ„$ÐHÇ„$ØHĨÃH‹œ$°H‰\$HH‹œ$¸H‰\$PH\$XHÇHÇCH\$XHƒû„êHÇÂHÇÁH‰œ$H‰”$˜H‰Œ$ HH‰$H\$HH‰\$èH‹L$H‹D$H‹œ$H‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$˜H‰\$H‹œ$ H‰\$ èH‹L$(H‹D$0HÇ„$ÀHÇ„$ÈH‰Œ$ÐH‰„$ØHĨÉéÿÿÿ +*0runtime.morestack_noctxtØ"strings.TrimSpaceœnet.ParseIP–net.IP.StringÞtype.string„runtime.convT2Eâ2runtime.writebarrierifaceðFgo.string."%s is not an ip address"äfmt.Errorf`Ð"".autotmp_0333ß"type.interface {}"".autotmp_0331/&type.[]interface {}"".autotmp_0329¿type.string"".autotmp_0328Ÿ(type.[1]interface {}"".autotmp_0327type.string"".autotmp_0325type.string +"".ip_type.net.IP "".~r2@type.error "".~r1 type.string "".valtype.string("ÐæÏЦÏÐÀîR_ M·k–pOTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216Tgclocals·0d8996d96a62b65d7a617f192875fe09¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ*"".ValidateMACAddressÀ¨eH‹ %H;awèëêHƒìHHÇD$`HÇD$hHÇD$pHÇD$xH‹\$PH‰$H‹\$XH‰\$èH‹L$H‹D$H‰L$8H‰ $H‰D$@H‰D$èH‹D$(H‹L$0Hƒøt!HÇD$`HÇD$hH‰D$pH‰L$xHƒÄHÃH‹\$PH‰\$`H‹\$XH‰\$hHÇD$pHÇD$xHƒÄHà + 0runtime.morestack_noctxt¤"strings.TrimSpaceènet.ParseMAC`"".autotmp_0337type.string "".~r2@type.error "".~r1 type.string "".valtype.string Ž* à€>D!7 QTgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ("".ValidateDNSSearchàÂeH‹ %H;awèëêHƒì0HÇD$HHÇD$PHÇD$XHÇD$`H‹\$8H‰$H‹t$@H‰t$H5Hl$H‰ïH¥H¥èH‹L$ H‹D$(HƒøucH‰L$8H‰ $H‰D$@H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹L$8H‹D$@¶\$ €ût!H‰L$HH‰D$PHÇD$XHÇD$`HƒÄ0ÃH‰L$8H‰ $H‰D$@H‰D$èH‹l$H‹T$H‹L$ H‹D$(H‰l$HH‰T$PH‰L$XH‰D$`HƒÄ0à + 0runtime.morestack_noctxt¨go.string." "Êstrings.Trimžgo.string."."Æ runtime.eqstringà""".validateDomain``"".autotmp_0339type.string "".~r2@type.error "".~r1 type.string "".valtype.string`Á_`D_°”>}!T dÌTgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ""".validateDomain  eH‹ %HD$èH;AwèëåHì˜HÇ„$°HÇ„$¸HÇ„$ÀHÇ„$ÈH‹H‰$H‹œ$ H‰\$H‹œ$¨H‰\$èH‹”$ H‹Œ$¨H‹\$H‹\$ Hƒû…H‰T$HH‰L$PH\$XHÇHÇCH\$XHƒû„êHÇÂHÇÁH‰œ$€H‰”$ˆH‰Œ$HH‰$H\$HH‰\$èH‹L$H‹D$H‹œ$€H‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ èH‹L$(H‹D$0HÇ„$°HÇ„$¸H‰Œ$ÀH‰„$ÈHĘÉéÿÿÿH‰$H‰L$èH\$Hl$H‰ïH‰ÞH¥H¥H¥H‹H‰$èH‹L$ H‹D$(H‹T$0H‰L$hH‰T$xH‰D$pHƒøŽH‰ËHƒø†¢HƒÃH‹kHýÿ}cH‰ËHƒøvSHƒÃH,$H‰ïH‰ÞH¥H¥H¥èH‹\$H‰œ$°H‹\$ H‰œ$¸HÇ„$ÀHÇ„$ÈHĘÃè H‹œ$ H‰\$HH‹œ$¨H‰\$PH\$XHÇHÇCH\$XHƒû„êHÇÂHÇÁH‰œ$€H‰”$ˆH‰Œ$HH‰$H\$HH‰\$èH‹L$H‹D$H‹œ$€H‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ èH‹L$(H‹D$0HÇ„$°HÇ„$¸H‰Œ$ÀH‰„$ÈHĘÉéÿÿÿè ( +*0runtime.morestack_noctxtª"".alphaRegexpð6regexp.(*Regexp).FindStringìtype.string’runtime.convT2Eð2runtime.writebarrierifaceþHgo.string."%s is not a valid domain"òfmt.Errorf2runtime.stringtoslicebyteÊ"".domainRegexpÜ:regexp.(*Regexp).FindSubmatch¬ 2runtime.slicebytetostringª +$runtime.panicindex‚ type.string¨ runtime.convT2E† 2runtime.writebarrieriface” Hgo.string."%s is not a valid domain"ˆfmt.Errorf”$runtime.panicindex`° "".autotmp_0356"type.interface {}"".autotmp_0355*type.*[1]interface {}"".autotmp_0354&type.[]interface {}"".autotmp_0353¿"type.interface {}"".autotmp_0351/&type.[]interface {}"".autotmp_0350type.error"".autotmp_0349type.string"".autotmp_0348(type.[1]interface {}"".autotmp_0347type.int"".autotmp_0346type.int"".autotmp_0343Ÿtype.string"".autotmp_0342(type.[1]interface {} +"".ns_type.[][]uint8 "".~r2@type.error "".~r1 type.string "".valtype.string6"°”¯°Ü¯°­¯°Ð(¢RNžM-c®&w‘pOÌpFTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216Tgclocals·e11ef9888c395c84aa28a6aa44bae264¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ("".ValidateExtraHost ˆeH‹ %HD$ÐH;AwèëåHì°HÇ„$ÈHÇ„$ÐHÇ„$ØHÇ„$àH‹œ$¸H‰$H‹´$ÀH‰t$H5Hl$H‰ïH¥H¥HÇD$ èH‹T$(H‹D$0H‹L$8H‰”$˜H‰Œ$¨H‰„$ Hƒø…ÒHƒø†öH‹jHƒý„ºH‰ÓHƒø†¦HƒÃH,$H‰ïH‰ÞH¥H¥èH‹D$ H‹\$(H‰\$HHƒøH‰D$@„1H\$pHÇHÇCH\$pHƒû„HÇÂHÇÁH‰œ$€H‰”$ˆH‰Œ$HH‰$H‹œ$˜Hƒ¼$ †¸HƒÃH‰\$èH‹L$H‹D$H‹œ$€H‰$H‰L$PH‰L$H‰D$XH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ èH‹L$(H‹D$0HÇ„$ÈHÇ„$ÐH‰Œ$ØH‰„$àHÄ°Ãè ‰éòþÿÿH‹œ$¸H‰œ$ÈH‹œ$ÀH‰œ$ÐHÇ„$ØHÇ„$àHÄ°Ãè H‹œ$¸H‰\$`H‹œ$ÀH‰\$hH\$pHÇHÇCH\$pHƒû„êHÇÂHÇÁH‰œ$€H‰”$ˆH‰Œ$HH‰$H\$`H‰\$èH‹L$H‹D$H‹œ$€H‰$H‰L$PH‰L$H‰D$XH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$€H‰\$H‹œ$ˆH‰\$H‹œ$H‰\$ èH‹L$(H‹D$0HÇ„$ÈHÇ„$ÐH‰Œ$ØH‰„$àHİÉéÿÿÿè $ +*0runtime.morestack_noctxtÜgo.string.":"strings.SplitNê("".ValidateIPAddressÆtype.string˜runtime.convT2Eö2runtime.writebarrieriface„\go.string."invalid IP address in add-host: %q"øfmt.Errorfö$runtime.panicindex’ +$runtime.panicindexê type.string runtime.convT2Eî 2runtime.writebarrierifaceü Ngo.string."bad format for add-host: %q"ð fmt.Errorfü$runtime.panicindex`à"".autotmp_0376"type.interface {}"".autotmp_0375*type.*[1]interface {}"".autotmp_0374&type.[]interface {}"".autotmp_0373¿"type.interface {}"".autotmp_0371_&type.[]interface {}"".autotmp_0370type.error"".autotmp_0369(type.[1]interface {}"".autotmp_0367Ÿtype.string"".autotmp_0366(type.[1]interface {} "".errßtype.error "".arr/type.[]string "".~r2@type.error "".~r1 type.string "".valtype.string4"à—ßàMßà­ßàÐ,¼RY*B±@®,‡m—ppF Tgclocals·4a0bb136639836c86d1f426111a5a477Tgclocals·0a70b462877543c2d66d492afda34c99¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ "".ValidateLabelÀ¬eH‹ %H;awèëêHì€HÇ„$˜HÇ„$ HÇ„$¨HÇ„$°H‹œ$ˆH‰$H‹´$H‰t$H5Hl$H‰ïH¥H¥èH‹”$ˆH‹Œ$H‹\$ Hƒû H‰T$HH‰L$PH\$XHÇHÇCH\$XHƒû„ÕHÇÂHÇÁH‰\$hH‰T$pH‰L$xHH‰$H\$HH‰\$èH‹L$H‹D$H‹\$hH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$hH‰\$H‹\$pH‰\$H‹\$xH‰\$ èH‹L$(H‹D$0HÇ„$˜HÇ„$ H‰Œ$¨H‰„$°HĀÉé$ÿÿÿH‰”$˜H‰Œ$ HÇ„$¨HÇ„$°HĀà + 0runtime.morestack_noctxtÒgo.string."="ôstrings.CountÔtype.stringúruntime.convT2EÒ2runtime.writebarrierifaceàHgo.string."bad attribute format: %s"Âfmt.Errorf`€"".autotmp_0389"type.interface {}"".autotmp_0387/&type.[]interface {}"".autotmp_0385otype.string"".autotmp_0384O(type.[1]interface {} "".~r2@type.error "".~r1 type.string "".valtype.string €ÿ€6ÿ àØMP‰:yƒd€Tgclocals·6d3fa487f5e45db9cb9199d2a5e0e216Tgclocals·1eb9d8ec9969f1d922533aa863dff6f6¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".ValidateHostàÚeH‹ %H;awèëêHƒìPHÇD$hHÇD$pHÇD$xHÇ„$€H‹H‰$H‹H‰\$H‹H‰\$H‹H‰\$H‹\$XH‰\$ H‹\$`H‰\$(èH‹l$0H‹T$8H‹D$@H‹L$HHƒøt&H‹\$XH‰\$hH‹\$`H‰\$pH‰D$xH‰Œ$€HƒÄPÃH‰l$hH‰T$pHÇD$xHÇ„$€HƒÄPà + 0runtime.morestack_noctxtˆ$"".DefaultHTTPHostž$"".DefaultHTTPHost¶("".DefaultUnixSocketÎ("".DefaultUnixSocketŠ²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers.ParseHost`  "".~r2@type.error "".~r1 type.string "".valtype.string  ®Ÿ #ŸðèA\&' „lTgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".doesEnvExistÀªeH‹ %HD$ÀH;AwèëåHìÀèH‹$H‹D$H‹L$H‰”$¨H‰„$°H‰Œ$¸H‰Œ$ 1ÉH‰„$˜H‰D$@H‰”$H‰ÐH‹l$@H9éúH‰D$PHƒø„H‹H‹xH‰L$HH‰T$hH‰|$pH‰T$XH‰$H‰|$`H‰|$HH|$H‰ÞH¥H¥HÇD$ èH‹´$ÐH‹T$(H‹L$0H‹D$8H‰„$ˆH‰T$xHƒùH‰Œ$€vH‹ +H‹BH9ðuDH‰L$hH‰ $H‰D$pH‰D$H‹¬$ÈH‰l$H‰t$è¶\$ €ûtÆ„$ØHÄÀÃH‹D$PH‹L$HHƒÀHÿÁH‹l$@H9éŒÿÿÿÆ„$ØHÄÀÃè ‰é÷þÿÿ +*0runtime.morestack_noctxtFos.Environðgo.string."="¤strings.SplitNô runtime.eqstring$runtime.panicindex0€"".autotmp_0400type.string"".autotmp_0399¯type.string"".autotmp_0398ßtype.*string"".autotmp_0397ÿtype.int"".autotmp_0396ïtype.int"".autotmp_0394_type.[]string"".autotmp_0393/type.[]string"".partstype.[]string"".entryÏtype.string "".~r1 type.bool"".nametype.string&"€µÿ€.ÿ€ *ø"€SS "¯¶Tgclocals·d7e8a62d22b1cde6d92b17a55c33fe8fTgclocals·e699116c0f498e16ce59c4e4fa0a75a3¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ"".NewUlimitOptÀ®eH‹ %H;awèëêHƒì(H‹\$01íH9ëu\HH‰$èH‹\$H‰\$ HH‰$HÇD$èH‹\$H‰\$H‹\$ H‰$Hƒ<$t[èH‹\$ H‰\$0HH‰$èH‹D$H‰D$H‰$Hƒ<$tH‹\$0H‰\$èH‹\$H‰\$8HƒÄ(É%ëÙ‰%ëœ + 0runtime.morestack_noctxtRÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitd"runtime.newobject†Ìtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitªruntime.makemapè.runtime.writebarrierptrŠ"type."".UlimitOptœ"runtime.newobjectä.runtime.writebarrierptr P"".autotmp_0402$type.*"".UlimitOpt"".autotmp_0401Îtype.*map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit "".~r1$type.*"".UlimitOpt "".refÎtype.*map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitPªOPà \L1#9$/Tgclocals·31b90725c9a885e731df361f51db8f0dTgclocals·f6dcde45bff02c6c4b088b594fd52a4cº/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.goþ&"".(*UlimitOpt).Set žeH‹ %H;awèëêHƒì@HÇD$`HÇD$hH‹\$PH‰$H‹\$XH‰\$èH‹D$H‹L$H‹T$ H‰T$8HƒùH‰L$0tH‰L$`H‰T$hHƒÄ@ÃH‰D$(HH‰$H‹\$HH‹+H‹]H‰\$H‰D$Hƒ|$t&H\$(H‰\$èHÇD$`HÇD$hHƒÄ@É%ëÑ + + 0runtime.morestack_noctxt€¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.ParseôÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÖ$runtime.mapassign1P€ +"".autotmp_0403/¶type.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit "".errtype.error "".~r10type.error "".valtype.string"".o$type.*"".UlimitOpt €W€S€ +Ð*,, = +?k&Tgclocals·61fa3b017c2e156e481b3d912c20f49bTgclocals·61e2515c69061b8fed0e66ece719f936º/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.goþ,"".(*UlimitOpt).StringÀ ¶ eH‹ %H„$`ÿÿÿH;AwèëâHì HÇ„$0HÇ„$8HÇD$pHÇD$xHÇ„$€H‹œ$(H‹+H‹MH¼$Ð1ÀèHH‰$H‰L$Hœ$ÐH‰\$èH‹œ$Ð1íH9ë„=H‹œ$ØH‹+H‹œ$ÐHƒû„IH‰,$èH‹\$H‰\$`H‹\$H‰\$hH‹T$pH‹L$xH‹œ$€H‰ØH)ËHƒû}OHH‰$H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$¨H‰„$°H‰ÓH‰”$ H‰ÍHkíHëH‰$H‹\$`H‰\$H‹\$hH‰\$èH‹”$ H‹Œ$¨H‹„$°H‰T$pH‰L$xH‰„$€Hœ$ÐH‰$èH‹œ$Ð1íH9ë…ÃþÿÿH‹\$pH‰œ$¸H‹\$xH‰œ$ÀH‹œ$€H‰œ$ÈH\$PHÇHÇCH\$PHƒû„ÕHÇÂHÇÁH‰œ$ˆH‰”$H‰Œ$˜HH‰$Hœ$¸H‰\$èH‹L$H‹D$H‹œ$ˆH‰$H‰L$@H‰L$H‰D$HH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$ èH‹L$(H‹D$0H‰Œ$0H‰„$8HÄ Éé$ÿÿÿ‰é°ýÿÿ +00runtime.morestack_noctxtêØ runtime.duffzeroøÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit®&runtime.mapiterinit ¾github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.(*Ulimit).String’type.[]string„"runtime.growslice²4runtime.writebarrierstring¨&runtime.mapiternext type.[]stringî runtime.convT2EÌ +2runtime.writebarrierifaceÚ +go.string."%v"Î fmt.Sprintf0À"".autotmp_0419¿"type.interface {}"".autotmp_0417¯&type.[]interface {}"".autotmp_0412ÿtype.[]string"".autotmp_0410type.string"".autotmp_0409Ïtype.[]string"".autotmp_0408Ÿ(type.[1]interface {}"".autotmp_0406ÿtype.string"".autotmp_0405ŸÖtype.map.iter[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit "".outßtype.[]string "".~r0type.string"".o$type.*"".UlimitOpt%Àç¿À &@=pü$© +$–«W;£p:Tgclocals·893bc98fd3630511d02cf4cf8c0f1f93Tgclocals·e10b17f36388f52b2772c8b1b26a55b3º/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.goþ."".(*UlimitOpt).GetList ’eH‹ %HD$¸H;AwèëåHìÈHÇ„$ØHÇ„$àHÇ„$èHÇD$HHÇD$PHÇD$XH‹œ$ÐH‹+H‹MH|$x1ÀèHH‰$H‰L$H\$xH‰\$èH‹\$x1íH9ë„èH‹œ$€H‹+H‹\$xHƒû„ýH‰l$@H‹T$HH‹L$PH‹\$XH‰ØH)ËHƒû}FHH‰$H‰T$`H‰T$H‰L$hH‰L$H‰D$pH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$hH‰D$pH‰T$`HÊH‰$H‹\$@H‰\$èH‹T$`H‹L$hH‹D$pH‰T$HH‰L$PH‰D$XH\$xH‰$èH‹\$x1íH9ë…ÿÿÿH‹\$HH‰œ$ØH‹\$PH‰œ$àH‹\$XH‰œ$èHÄÈÉéüþÿÿ +*0runtime.morestack_noctxtðØ runtime.duffzeroþÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit®&runtime.mapiterinitкtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit°"runtime.growslice¦.runtime.writebarrierptrþ&runtime.mapiternext@ "".autotmp_0430Ϻtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit"".autotmp_0427ŸÖtype.map.iter[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit"".v¶type.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit"".ulimitsÿºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit "".~r0ºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit"".o$type.*"".UlimitOpt"ŸÐ RFi«/–;,RTgclocals·afd56e89fe406cd8321967b6f2c293efTgclocals·26d269e519e13fce3b5a9726f3ff5d6dº/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.goþ"".init  eH‹ %H;awèëêHƒìp¶€ût¶€ûuHƒÄpÃè ÆèèèèèèèèèèHH,$H‰ïH‰ÞH¥H¥èH‹D$HH‰$H‰D$èHH‰$èH‹\$Hƒû„?HÇÂHÇÁH‰\$XH‰T$`H‰L$hHH‰$HH‰\$èH‹L$H‹D$H‹\$XH‰$H‰L$8H‰L$H‰D$@H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$XH‰\$H‹\$`H‰\$H‹\$hH‰\$ èH‹L$(H‹D$0HH‰$H‰L$HH‰L$H‰D$PH‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹D$HH‰$H‰D$èHH,$H‰ïH‰ÞH¥H¥èH‹D$HH‰$H‰D$èÆHƒÄpÉéºþÿÿN + 0runtime.morestack_noctxt:"".initdone·R"".initdone·p"runtime.throwinit€"".initdone·Œ¦github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.init–žgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume.init ¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers.initªpath.init´net.init¾strings.initÈregexp.initÒos.initÜfmt.initæbufio.initô`go.string."^[[:alpha:]_][[:alpha:][:digit:]_]*$"š$regexp.MustCompile²8"".EnvironmentVariableRegexpÎ.runtime.writebarrierptrÜ(type.[1]interface {}î"runtime.newobjectÔtype.stringê("".DefaultUnixSocketþruntime.convT2EÖ2runtime.writebarrierifaceä*go.string."unix://%s"Æfmt.Sprintfè"".DefaultHost¢4runtime.writebarrierstring°(go.string."[a-zA-Z]"Ö$regexp.MustCompileî"".alphaRegexpŠ.runtime.writebarrierptr˜""..gostring.1¾$regexp.MustCompileÖ"".domainRegexpò.runtime.writebarrierptrþ"".initdone·à +"".autotmp_0444o"type.interface {}"".autotmp_0442/&type.[]interface {}"".autotmp_0441&type.*regexp.Regexp"".autotmp_0440&type.*regexp.Regexp"".autotmp_0439Otype.string&àßàÑßàw4êh  `w?4ê44< O7Çd®Tgclocals·0115f8d53b75c1696444f08ad03251d9Tgclocals·da64d0820e77fbc27f563f985efdc21fº/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goÂ/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.goþ(type..hash.[0]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_0450type.int"".autotmp_0449type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[0]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ$type..eq.[0]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_0454?type.string"".autotmp_0453type.string"".autotmp_0452_type.int"".autotmp_0451Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[0]string"".ptype.*[0]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ:"".(*ErrBadEnvVariable).Error œeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HH‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$8Hƒþt)H,$H‰ïH¥H¥èH‹L$H‹D$H‰L$@H‰D$HHƒÄ0ÉëÓ + 0runtime.morestack_noctxt¢ go.string."opts"Ì:go.string."ErrBadEnvVariable"ø"go.string."Error" "runtime.panicwrapÚ4"".ErrBadEnvVariable.Error0` "".~r0type.string""..this4type.*"".ErrBadEnvVariable`¯_`ÐÐ ATgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fþ4type..hash.[1]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0458type.int"".autotmp_0457type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[1]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ0type..eq.[1]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0462?"type.interface {}"".autotmp_0461"type.interface {}"".autotmp_0460_type.int"".autotmp_0459Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[1]interface {}"".p*type.*[1]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ4type..hash.[2]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0465type.int"".autotmp_0464type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[2]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ0type..eq.[2]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0469?"type.interface {}"".autotmp_0468"type.interface {}"".autotmp_0467_type.int"".autotmp_0466Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[2]interface {}"".p*type.*[2]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ."".(*IpOpt).DefaultMask žeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HHÇD$PH‹|$8H‹7Hƒþt2H<$H¥H¥H¥èH‹T$H‹L$ H‹D$(H‰T$@H‰L$HH‰D$PHƒÄ0ÉëÊ + 0runtime.morestack_noctxtÈ$net.IP.DefaultMask@` "".~r1type.net.IPMask""..thistype.*"".IpOpt`p_` +c-Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ""".(*IpOpt).Equal€úeH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹|$@H‹7Hƒþt;H<$H¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(è¶\$0ˆ\$`HƒÄ8ÉëÁ + 0runtime.morestack_noctxtÎnet.IP.EqualPp "".~r2@type.boolnet.x·3type.net.IP""..thistype.*"".IpOptp^op€€ +fTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþ6"".(*IpOpt).IsGlobalUnicastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹|$(H‹7HƒþtH<$H¥H¥H¥è¶\$ˆ\$0HƒÄ Éëß + 0runtime.morestack_noctxt’,net.IP.IsGlobalUnicast @ "".~r1type.bool""..thistype.*"".IpOpt@@?@` +` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþJ"".(*IpOpt).IsInterfaceLocalMulticastààeH‹ %H;awèëêHƒì(H‹Y H…Ût H|$0H9;uH‰#HH‰$èH‹D$H‰D$ H‰$H‹|$0H‹7Hƒþ„‰H|$H¥H¥H¥èH‹T$ H‹jHƒýu_H‹ +H‹BL‹BHƒøvR¶€ûÿuFHÇÀ<t0H‹H‹BL‹BHƒøv#HÿöHƒã€ûuHÇÀˆD$8HƒÄ(Ã1Àëóè 1Àë½è ‰épÿÿÿ + 0runtime.morestack_noctxtftype.net.IPx"runtime.newobjectØ2runtime.writebarrierslice°$runtime.panicindexÆ$runtime.panicindex P"".&net.ip·2type.*net.IP "".~r1type.bool""..thistype.*"".IpOptP¸OPð ð;0lTgclocals·2148c3737b2bb476685a1100a2e8343eTgclocals·e1ae6533a9e39048ba0735a2264ce16aþ@"".(*IpOpt).IsLinkLocalMulticastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹|$(H‹7HƒþtH<$H¥H¥H¥è¶\$ˆ\$0HƒÄ Éëß + 0runtime.morestack_noctxt’6net.IP.IsLinkLocalMulticast @ "".~r1type.bool""..thistype.*"".IpOpt@@?@`` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ<"".(*IpOpt).IsLinkLocalUnicastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹|$(H‹7HƒþtH<$H¥H¥H¥è¶\$ˆ\$0HƒÄ Éëß + 0runtime.morestack_noctxt’2net.IP.IsLinkLocalUnicast @ "".~r1type.bool""..thistype.*"".IpOpt@@?@`` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ,"".(*IpOpt).IsLoopbackÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹|$(H‹7HƒþtH<$H¥H¥H¥è¶\$ˆ\$0HƒÄ Éëß + 0runtime.morestack_noctxt’"net.IP.IsLoopback @ "".~r1type.bool""..thistype.*"".IpOpt@@?@`` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ."".(*IpOpt).IsMulticastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹|$(H‹7HƒþtH<$H¥H¥H¥è¶\$ˆ\$0HƒÄ Éëß + 0runtime.morestack_noctxt’$net.IP.IsMulticast @ "".~r1type.bool""..thistype.*"".IpOpt@@?@`` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ2"".(*IpOpt).IsUnspecifiedÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹|$(H‹7HƒþtH<$H¥H¥H¥è¶\$ˆ\$0HƒÄ Éëß + 0runtime.morestack_noctxt’(net.IP.IsUnspecified @ "".~r1type.bool""..thistype.*"".IpOpt@@?@`` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ."".(*IpOpt).MarshalText€êeH‹ %H;awèëêHƒì@H‹Y H…Ût H|$HH9;uH‰#HÇD$PHÇD$XHÇD$`HÇD$hHÇD$pH‹|$HH‹7HƒþtFH<$H¥H¥H¥èH‹t$H‹l$ H‹T$(H‹L$0H‹D$8H‰t$PH‰l$XH‰T$`H‰L$hH‰D$pHƒÄ@Éë¶ + 0runtime.morestack_noctxtì$net.IP.MarshalText`€ "".~r2@type.error "".~r1type.[]uint8""..thistype.*"".IpOpt€–€ÀÀ +uKTgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9fþ "".(*IpOpt).Mask€æeH‹ %H;awèëêHƒìHH‹Y H…Ût H|$PH9;uH‰#HÇD$pHÇD$xHÇ„$€H‹|$PH‹7HƒþtSH<$H¥H¥H¥H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(èH‹T$0H‹L$8H‹D$@H‰T$pH‰L$xH‰„$€HƒÄHÉë© + 0runtime.morestack_noctxtŠnet.IP.Maskp "".~r2@type.net.IPnet.mask·3type.net.IPMask""..thistype.*"".IpOpt”ÀÀ „<Tgclocals·9877a4ef732a0f966b889793f9b99b87Tgclocals·3280bececceccd33cb74587feedb1f9fþ "".(*IpOpt).To16 žeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HHÇD$PH‹|$8H‹7Hƒþt2H<$H¥H¥H¥èH‹T$H‹L$ H‹D$(H‰T$@H‰L$HH‰D$PHƒÄ0ÉëÊ + 0runtime.morestack_noctxtÈnet.IP.To16@` "".~r1type.net.IP""..thistype.*"".IpOpt`p_` +c-Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ"".(*IpOpt).To4 žeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HHÇD$PH‹|$8H‹7Hƒþt2H<$H¥H¥H¥èH‹T$H‹L$ H‹D$(H‰T$@H‰L$HH‰D$PHƒÄ0ÉëÊ + 0runtime.morestack_noctxtÈnet.IP.To4@` "".~r1type.net.IP""..thistype.*"".IpOpt`p_` +c-Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ2"".(*IpOpt).UnmarshalText`HHÇD$(HÇD$0H‹\$H‹+H‰l$é@.net.(*IP).UnmarshalText` "".~r2@type.errornet.text·3type.[]uint8""..thistype.*"".IpOpt00 0Tgclocals·9f0d5ba6770c4a1ed4fa771547e96df1Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".IpOpt.DefaultMask žeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HHÇD$PH‹t$8Hƒþt5H,$H‰ïH¥H¥H¥èH‹T$H‹L$ H‹D$(H‰T$@H‰L$HH‰D$PHƒÄ0ÉëÇ + 0runtime.morestack_noctxtÈ$net.IP.DefaultMask@` "".~r1type.net.IPMask""..thistype."".IpOpt`p_`" +c-Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ"".IpOpt.Equal€úeH‹ %H;awèëêHƒì8H‹Y H…Ût H|$@H9;uH‰#H‹t$@Hƒþt>H,$H‰ïH¥H¥H¥H‹\$HH‰\$H‹\$PH‰\$ H‹\$XH‰\$(è¶\$0ˆ\$`HƒÄ8Éë¾ + 0runtime.morestack_noctxtÎnet.IP.EqualPp "".~r2@type.boolnet.x·3type.net.IP""..thistype."".IpOptp^op€$€ +fTgclocals·14c45952157723c8762210d9c661bf29Tgclocals·3280bececceccd33cb74587feedb1f9fþ0"".IpOpt.IsGlobalUnicastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹t$(Hƒþt H,$H‰ïH¥H¥H¥è¶\$ˆ\$0HƒÄ ÉëÜ + 0runtime.morestack_noctxt’,net.IP.IsGlobalUnicast @ "".~r1type.bool""..thistype."".IpOpt@@?@`&` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþD"".IpOpt.IsInterfaceLocalMulticastààeH‹ %H;awèëêHƒì(H‹Y H…Ût H|$0H9;uH‰#HH‰$èH‹D$H‰D$ H‰$H‹t$0Hƒþ„ŒHl$H‰ïH¥H¥H¥èH‹T$ H‹jHƒýu_H‹ +H‹BL‹BHƒøvR¶€ûÿuFHÇÀ<t0H‹H‹BL‹BHƒøv#HÿöHƒã€ûuHÇÀˆD$8HƒÄ(Ã1Àëóè 1Àë½è ‰émÿÿÿ + 0runtime.morestack_noctxtftype.net.IPx"runtime.newobjectØ2runtime.writebarrierslice°$runtime.panicindexÆ$runtime.panicindex P"".&net.ip·2type.*net.IP "".~r1type.bool""..thistype."".IpOptP¸OPð(ð;0lTgclocals·2148c3737b2bb476685a1100a2e8343eTgclocals·e1ae6533a9e39048ba0735a2264ce16aþ:"".IpOpt.IsLinkLocalMulticastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹t$(Hƒþt H,$H‰ïH¥H¥H¥è¶\$ˆ\$0HƒÄ ÉëÜ + 0runtime.morestack_noctxt’6net.IP.IsLinkLocalMulticast @ "".~r1type.bool""..thistype."".IpOpt@@?@`*` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ6"".IpOpt.IsLinkLocalUnicastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹t$(Hƒþt H,$H‰ïH¥H¥H¥è¶\$ˆ\$0HƒÄ ÉëÜ + 0runtime.morestack_noctxt’2net.IP.IsLinkLocalUnicast @ "".~r1type.bool""..thistype."".IpOpt@@?@`,` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ&"".IpOpt.IsLoopbackÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹t$(Hƒþt H,$H‰ïH¥H¥H¥è¶\$ˆ\$0HƒÄ ÉëÜ + 0runtime.morestack_noctxt’"net.IP.IsLoopback @ "".~r1type.bool""..thistype."".IpOpt@@?@`.` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".IpOpt.IsMulticastÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹t$(Hƒþt H,$H‰ïH¥H¥H¥è¶\$ˆ\$0HƒÄ ÉëÜ + 0runtime.morestack_noctxt’$net.IP.IsMulticast @ "".~r1type.bool""..thistype."".IpOpt@@?@`0` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ,"".IpOpt.IsUnspecifiedÀ¾eH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹t$(Hƒþt H,$H‰ïH¥H¥H¥è¶\$ˆ\$0HƒÄ ÉëÜ + 0runtime.morestack_noctxt’(net.IP.IsUnspecified @ "".~r1type.bool""..thistype."".IpOpt@@?@`2` +HTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fþ("".IpOpt.MarshalText€êeH‹ %H;awèëêHƒì@H‹Y H…Ût H|$HH9;uH‰#HÇD$PHÇD$XHÇD$`HÇD$hHÇD$pH‹t$HHƒþtIH,$H‰ïH¥H¥H¥èH‹t$H‹l$ H‹T$(H‹L$0H‹D$8H‰t$PH‰l$XH‰T$`H‰L$hH‰D$pHƒÄ@Éë³ + 0runtime.morestack_noctxtì$net.IP.MarshalText`€ "".~r2@type.error "".~r1type.[]uint8""..thistype."".IpOpt€–€À4À +uKTgclocals·13c015770347481bee7a16dde25a3e2fTgclocals·3280bececceccd33cb74587feedb1f9fþ"".IpOpt.Mask€æeH‹ %H;awèëêHƒìHH‹Y H…Ût H|$PH9;uH‰#HÇD$pHÇD$xHÇ„$€H‹t$PHƒþtVH,$H‰ïH¥H¥H¥H‹\$XH‰\$H‹\$`H‰\$ H‹\$hH‰\$(èH‹T$0H‹L$8H‹D$@H‰T$pH‰L$xH‰„$€HƒÄHÉë¦ + 0runtime.morestack_noctxtŠnet.IP.Maskp "".~r2@type.net.IPnet.mask·3type.net.IPMask""..thistype."".IpOpt”À6À „<Tgclocals·9877a4ef732a0f966b889793f9b99b87Tgclocals·3280bececceccd33cb74587feedb1f9fþ"".IpOpt.To16 žeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HHÇD$PH‹t$8Hƒþt5H,$H‰ïH¥H¥H¥èH‹T$H‹L$ H‹D$(H‰T$@H‰L$HH‰D$PHƒÄ0ÉëÇ + 0runtime.morestack_noctxtÈnet.IP.To16@` "".~r1type.net.IP""..thistype."".IpOpt`p_`8 +c-Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ"".IpOpt.To4 žeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$@HÇD$HHÇD$PH‹t$8Hƒþt5H,$H‰ïH¥H¥H¥èH‹T$H‹L$ H‹D$(H‰T$@H‰L$HH‰D$PHƒÄ0ÉëÇ + 0runtime.morestack_noctxtÈnet.IP.To4@` "".~r1type.net.IP""..thistype."".IpOpt`p_`: +c-Tgclocals·13d3af77a5bf02af6db4588efb2ea811Tgclocals·3280bececceccd33cb74587feedb1f9fþ,"".IpOpt.UnmarshalText ŽeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#HÇD$XHÇD$`H‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$èH‹L$ H‹D$(H‰L$XH‰D$`HƒÄ0à + 0runtime.morestack_noctxtÔ.net.(*IP).UnmarshalText`` "".~r2@type.errornet.text·3type.[]uint8""..thistype."".IpOpt`l_ +< +i'Tgclocals·9f0d5ba6770c4a1ed4fa771547e96df1Tgclocals·3280bececceccd33cb74587feedb1f9fþ(type..hash.[8]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_0511type.int"".autotmp_0510type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[8]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ$type..eq.[8]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_0515?type.string"".autotmp_0514type.string"".autotmp_0513_type.int"".autotmp_0512Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[8]string"".ptype.*[8]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ(type..hash.[3]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_0518type.int"".autotmp_0517type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[3]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ$type..eq.[3]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_0522?type.string"".autotmp_0521type.string"".autotmp_0520_type.int"".autotmp_0519Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[3]string"".ptype.*[3]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ4type..hash.[3]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0525type.int"".autotmp_0524type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[3]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ0type..eq.[3]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0529?"type.interface {}"".autotmp_0528"type.interface {}"".autotmp_0527_type.int"".autotmp_0526Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[3]interface {}"".p*type.*[3]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¼/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.goþ,4go.itab.*os.File.io.Readerþ,Dgo.itab."".ErrBadEnvVariable.errorþgo.string."#"0$# go.string."#"þgo.string."="0$= go.string."="þzgo.string."variable '%s' is not a valid environment variable"„1variable '%s' is not a valid environment variable zgo.string."variable '%s' is not a valid environment variable"þ"go.string."%s=%s"0,%s=%s "go.string."%s=%s"þTgclocals·ad819538c58aa8f64fd2a144ce4dfed6ððvÀ ,,€€È€È€È€È€È‚À?À?€À?À?À? À?€À?À?À?À?þTgclocals·231e82aa2fc136f1b81a915e25ec3cfaˆˆ",","",þXgo.string."poorly formatted environment: %s"pb poorly formatted environment: %s Xgo.string."poorly formatted environment: %s"þTgclocals·403a8d79fd24b295e8557f6970497aa3((ððþTgclocals·363b18caf0020ca418fd378dbb75c855((þTgclocals·f6dcde45bff02c6c4b088b594fd52a4c((þTgclocals·b29a376724b9675f7c9e576a6dabc1e0(( + + +þFgo.string."%s is not an ip address"PP%s is not an ip address Fgo.string."%s is not an ip address"þTgclocals·e11ef9888c395c84aa28a6aa44bae264((  þTgclocals·149f5bf45741ad4d84849674a456615e(( + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·9307bf1379da22b408b9b243276c0115((ª¨þTgclocals·6d340c3bdac448a6ef1256f331f68dd3((þTgclocals·e1ae6533a9e39048ba0735a2264ce16a þTgclocals·bd51743682bd6c0f7b9f2e8e6dffed99  + +þgo.string."%v"0&%v go.string."%v"þTgclocals·7876b70d8da64fa07ca2fd3ecc71f905((ð‚ð€þTgclocals·6d340c3bdac448a6ef1256f331f68dd3((þTgclocals·1509598f597bd125bdfd9d44972821c7 þTgclocals·61fa3b017c2e156e481b3d912c20f49b  + + +þTgclocals·3548d93f69958cd35a7e42f3f32eff97PP"‚‚þTgclocals·2018557e3ee0abccf2865b16663e690b00 + + + +þTgclocals·895437610367c247af3cb64952bed446  + þTgclocals·2148c3737b2bb476685a1100a2e8343e þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13d3af77a5bf02af6db4588efb2ea811þTgclocals·4398bb51467914f29637b614067b995f þTgclocals·9ff42bf311af152488d11f0f78c8d5ce  + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·729deb178891d0cb0d4c5b2058f91105 þTgclocals·61fa3b017c2e156e481b3d912c20f49b  + + +þTgclocals·403a8d79fd24b295e8557f6970497aa3((ððþTgclocals·6d340c3bdac448a6ef1256f331f68dd3((þTgclocals·e1ae6533a9e39048ba0735a2264ce16a þTgclocals·bd51743682bd6c0f7b9f2e8e6dffed99  + +þ"go.string."stdin"0,stdin "go.string."stdin"þ$go.string."stdout"0.stdout $go.string."stdout"þ$go.string."stderr"0.stderr $go.string."stderr"þlgo.string."valid streams are STDIN, STDOUT and STDERR"€v*valid streams are STDIN, STDOUT and STDERR lgo.string."valid streams are STDIN, STDOUT and STDERR"þTgclocals·5de38da5eeb0729bf417a80c29b78c42(( ‚ " "þTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216(( þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13c015770347481bee7a16dde25a3e2f þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13c015770347481bee7a16dde25a3e2f þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13c015770347481bee7a16dde25a3e2f þgo.string.":"0$: go.string.":"þLgo.string."bad format for volumes: %s"`Vbad format for volumes: %s Lgo.string."bad format for volumes: %s"þ"go.string."%s:%s"0,%s:%s "go.string."%s:%s"þRgo.string."bad mount mode specified : %s"`\bad mount mode specified : %s Rgo.string."bad mount mode specified : %s"þ(go.string."%s:%s:%s"@2%s:%s:%s (go.string."%s:%s:%s"þLgo.string."%s is not an absolute path"`V%s is not an absolute path Lgo.string."%s is not an absolute path"þTgclocals·468082ab545c1bae5a803029688d85e2ÀÀPò€ò  €ððð€ðð" ðÿ€ðÿðÿþTgclocals·17574d085f5f5b0763ac1aaf01ce4b67  þTgclocals·e77b4954b1067dbe825673e24b2082a5€€,òòò ð ðþTgclocals·b343c92068d41d468064df311efb05d1HH "þTgclocals·0d8996d96a62b65d7a617f192875fe09(( € þTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216(( þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·13c015770347481bee7a16dde25a3e2f þgo.string." "0$  go.string." "þgo.string."."0$. go.string."."þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13c015770347481bee7a16dde25a3e2f þHgo.string."%s is not a valid domain"`R%s is not a valid domain Hgo.string."%s is not a valid domain"þTgclocals·e11ef9888c395c84aa28a6aa44bae264((  þTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216(( þNgo.string."bad format for add-host: %q"`Xbad format for add-host: %q Ngo.string."bad format for add-host: %q"þ\go.string."invalid IP address in add-host: %q"pf"invalid IP address in add-host: %q \go.string."invalid IP address in add-host: %q"þTgclocals·0a70b462877543c2d66d492afda34c99@@€ððòòþTgclocals·4a0bb136639836c86d1f426111a5a477@@ þHgo.string."bad attribute format: %s"`Rbad attribute format: %s Hgo.string."bad attribute format: %s"þTgclocals·1eb9d8ec9969f1d922533aa863dff6f6(( / þTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216(( þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·13c015770347481bee7a16dde25a3e2f þTgclocals·e699116c0f498e16ce59c4e4fa0a75a3 þTgclocals·d7e8a62d22b1cde6d92b17a55c33fe8f þTgclocals·f6dcde45bff02c6c4b088b594fd52a4c((þTgclocals·31b90725c9a885e731df361f51db8f0d((þTgclocals·61e2515c69061b8fed0e66ece719f936 þTgclocals·61fa3b017c2e156e481b3d912c20f49b  + + +þTgclocals·e10b17f36388f52b2772c8b1b26a55b3pp8  ªU ªU ªUð€ð€þTgclocals·893bc98fd3630511d02cf4cf8c0f1f93@@þTgclocals·26d269e519e13fce3b5a9726f3ff5d6dPP"€ªV€ªV‚ªVþTgclocals·afd56e89fe406cd8321967b6f2c293ef00þgo.string." \t"0&  go.string." \t"þ@go.string."/var/run/docker.sock"PJ/var/run/docker.sock @go.string."/var/run/docker.sock"þ*go.string."127.0.0.1"@4 127.0.0.1 *go.string."127.0.0.1"þ`go.string."^[[:alpha:]_][[:alpha:][:digit:]_]*$"pj$^[[:alpha:]_][[:alpha:][:digit:]_]*$ `go.string."^[[:alpha:]_][[:alpha:][:digit:]_]*$"þ*go.string."unix://%s"@4 unix://%s *go.string."unix://%s"þ(go.string."[a-zA-Z]"@2[a-zA-Z] (go.string."[a-zA-Z]"þ""..gostring.1°¨ƒ^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$ ""..gostring.1þTgclocals·da64d0820e77fbc27f563f985efdc21f þTgclocals·0115f8d53b75c1696444f08ad03251d9þ*8"".EnvironmentVariableRegexp&type.*regexp.Regexpþ*"".whiteSpaces type.string  go.string." \t"þ*"".DefaultHost type.stringþ*"".alphaRegexp&type.*regexp.Regexpþ*"".domainRegexp&type.*regexp.Regexpþ*$"".DefaultHTTPHost type.string  *go.string."127.0.0.1"þ,$"".DefaultHTTPPorttype.intG þ*("".DefaultUnixSocket type.string  @go.string."/var/run/docker.sock"þ""".statictmp_0194`type.[3]string` "go.string."stdin"  $go.string."stdout"@ $go.string."stderr"þ,"".initdone·type.uint8þ$"".ParseEnvFile·f"".ParseEnvFileþos.Open·fos.Openþ(runtime.newobject·f"runtime.newobjectþ,runtime.deferreturn·f&runtime.deferreturnþ&os.(*File).Close·f os.(*File).Closeþ(runtime.deferproc·f"runtime.deferprocþ&runtime.typ2Itab·f runtime.typ2Itabþ(runtime.makeslice·f"runtime.makesliceþ8runtime.writebarrieriface·f2runtime.writebarrierifaceþ$bufio.ScanLines·fbufio.ScanLinesþ4runtime.writebarrierptr·f.runtime.writebarrierptrþ8runtime.writebarrierslice·f2runtime.writebarriersliceþ0bufio.(*Scanner).Scan·f*bufio.(*Scanner).Scanþ8runtime.slicebytetostring·f2runtime.slicebytetostringþ*runtime.panicslice·f$runtime.panicsliceþ&runtime.eqstring·f runtime.eqstringþ"strings.SplitN·fstrings.SplitNþ*runtime.panicindex·f$runtime.panicindexþ&strings.TrimLeft·f strings.TrimLeftþ>regexp.(*Regexp).MatchString·f8regexp.(*Regexp).MatchStringþ$runtime.convT2E·fruntime.convT2Eþfmt.Sprintf·ffmt.Sprintfþ$runtime.convT2I·fruntime.convT2Iþ(runtime.growslice·f"runtime.growsliceþ:runtime.writebarrierstring·f4runtime.writebarrierstringþ(strings.TrimSpace·f"strings.TrimSpaceþos.Getenv·fos.Getenvþ$runtime.ifaceeq·fruntime.ifaceeqþ,runtime.throwreturn·f&runtime.throwreturnþ:"".ErrBadEnvVariable.Error·f4"".ErrBadEnvVariable.Errorþ"".NewIpOpt·f"".NewIpOptþ$"".(*IpOpt).Set·f"".(*IpOpt).Setþnet.ParseIP·fnet.ParseIPþfmt.Errorf·ffmt.Errorfþ*"".(*IpOpt).String·f$"".(*IpOpt).Stringþ net.IP.String·fnet.IP.Stringþ""".NewListOpts·f"".NewListOptsþ("".NewListOptsRef·f""".NewListOptsRefþ0"".(*ListOpts).String·f*"".(*ListOpts).Stringþ*"".(*ListOpts).Set·f$"".(*ListOpts).Setþ0"".(*ListOpts).Delete·f*"".(*ListOpts).Deleteþ$runtime.memmove·fruntime.memmoveþ0"".(*ListOpts).GetMap·f*"".(*ListOpts).GetMapþ$runtime.makemap·fruntime.makemapþ*runtime.mapassign1·f$runtime.mapassign1þ0"".(*ListOpts).GetAll·f*"".(*ListOpts).GetAllþ*"".(*ListOpts).Get·f$"".(*ListOpts).Getþ*"".(*ListOpts).Len·f$"".(*ListOpts).Lenþ("".(*MapOpts).Set·f""".(*MapOpts).Setþ."".(*MapOpts).String·f("".(*MapOpts).Stringþ "".NewMapOpts·f"".NewMapOptsþ("".ValidateAttach·f""".ValidateAttachþ$strings.ToLower·fstrings.ToLowerþ$"".ValidateLink·f"".ValidateLinkþ¸github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers.ParseLink·f²github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers.ParseLinkþ("".ValidateDevice·f""".ValidateDeviceþ$"".validatePath·f"".validatePathþ$"".ValidatePath·f"".ValidatePathþ strings.Count·fstrings.Countþpath.Clean·fpath.Cleanþ:runtime.mapaccess1_faststr·f4runtime.mapaccess1_faststrþ""".ValidateEnv·f"".ValidateEnvþ strings.Split·fstrings.Splitþ$"".doesEnvExist·f"".doesEnvExistþ."".ValidateIPAddress·f("".ValidateIPAddressþ0"".ValidateMACAddress·f*"".ValidateMACAddressþnet.ParseMAC·fnet.ParseMACþ."".ValidateDNSSearch·f("".ValidateDNSSearchþstrings.Trim·fstrings.Trimþ("".validateDomain·f""".validateDomainþgo.typelink.[0]string/[0]stringtype.[0]stringþ&go.string."[]uint8"00[]uint8 &go.string."[]uint8"þtype.[]uint8  ß~.8   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P&go.string."[]uint8"p*go.weak.type.*[]uint8€"runtime.zerovaluetype.uint8þ6go.typelink.[]uint8/[]uint8type.[]uint8þbruntime.gcbits.0x88000000000000000000000000000000 ˆþFgo.string."*opts.ErrBadEnvVariable"PP*opts.ErrBadEnvVariable Fgo.string."*opts.ErrBadEnvVariable"þ go.string."opts"0*opts go.string."opts"þ:go.string."ErrBadEnvVariable"PDErrBadEnvVariable :go.string."ErrBadEnvVariable"þ"go.string."Error"0,Error "go.string."Error"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þ`go.string."func(*opts.ErrBadEnvVariable) string"pj$func(*opts.ErrBadEnvVariable) string `go.string."func(*opts.ErrBadEnvVariable) string"þNtype.func(*"".ErrBadEnvVariable) string  ‚®3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*opts.ErrBadEnvVariable) string"p`go.weak.type.*func(*"".ErrBadEnvVariable) string€"runtime.zerovalue €Ntype.func(*"".ErrBadEnvVariable) stringÐNtype.func(*"".ErrBadEnvVariable) string€4type.*"".ErrBadEnvVariabletype.stringþ2go.string."func() string"@< func() string 2go.string."func() string"þ$type.func() string¢mË3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."func() string"p6go.weak.type.*func() string€"runtime.zerovalue €$type.func() stringЀ$type.func() string€type.stringþ4type.*"".ErrBadEnvVariableÐÐ¥kvû6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*opts.ErrBadEnvVariable"pFgo.weak.type.**"".ErrBadEnvVariable€"runtime.zerovalue2type."".ErrBadEnvVariable` 4type.*"".ErrBadEnvVariableÀð4type.*"".ErrBadEnvVariableð"go.string."Error"$type.func() string Ntype.func(*"".ErrBadEnvVariable) string°:"".(*ErrBadEnvVariable).ErrorÀ:"".(*ErrBadEnvVariable).Errorþbruntime.gcbits.0x48000000000000000000000000000000 HþDgo.string."opts.ErrBadEnvVariable"PNopts.ErrBadEnvVariable Dgo.string."opts.ErrBadEnvVariable"þgo.string."msg"0(msg go.string."msg"þ^go.string."func(opts.ErrBadEnvVariable) string"ph#func(opts.ErrBadEnvVariable) string ^go.string."func(opts.ErrBadEnvVariable) string"þLtype.func("".ErrBadEnvVariable) string  ã¾€„3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(opts.ErrBadEnvVariable) string"p^go.weak.type.*func("".ErrBadEnvVariable) string€"runtime.zerovalue €Ltype.func("".ErrBadEnvVariable) stringÐLtype.func("".ErrBadEnvVariable) string€2type."".ErrBadEnvVariabletype.stringþ2type."".ErrBadEnvVariableÀÀT æU$ À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000PDgo.string."opts.ErrBadEnvVariable"p4type.*"".ErrBadEnvVariable€"runtime.zerovalueÀ2type."".ErrBadEnvVariableÀgo.string."msg"Ð"go.importpath."".àtype.string`2type."".ErrBadEnvVariable:go.string."ErrBadEnvVariable" "go.importpath."".°à2type."".ErrBadEnvVariableà"go.string."Error"€$type.func() stringLtype.func("".ErrBadEnvVariable) string :"".(*ErrBadEnvVariable).Error°4"".ErrBadEnvVariable.Errorþbruntime.gcbits.0xcc000000000000000000000000000000 Ìþ0go.string."interface {}"@: interface {} 0go.string."interface {}"þ"type.interface {}ÀÀçW  € runtime.algarray0bruntime.gcbits.0xcc000000000000000000000000000000P0go.string."interface {}"p4go.weak.type.*interface {}€"runtime.zerovalueÀ"type.interface {}þ4go.string."[]interface {}"@>[]interface {} 4go.string."[]interface {}"þ&type.[]interface {}  p“ê/   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]interface {}"p8go.weak.type.*[]interface {}€"runtime.zerovalue"type.interface {}þRgo.typelink.[]interface {}/[]interface {}&type.[]interface {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þtype.func(*"".IpOpt) net.IPMask  xH«²3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(*opts.IpOpt) net.IPMask"pPgo.weak.type.*func(*"".IpOpt) net.IPMask€"runtime.zerovalue €>type.func(*"".IpOpt) net.IPMaskÐ>type.func(*"".IpOpt) net.IPMask€type.*"".IpOpttype.net.IPMaskþTgo.string."func(*opts.IpOpt, net.IP) bool"`^func(*opts.IpOpt, net.IP) bool Tgo.string."func(*opts.IpOpt, net.IP) bool"þBtype.func(*"".IpOpt, net.IP) bool°°&Ð*3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."func(*opts.IpOpt, net.IP) bool"pTgo.weak.type.*func(*"".IpOpt, net.IP) bool€"runtime.zerovalue €Btype.func(*"".IpOpt, net.IP) boolРBtype.func(*"".IpOpt, net.IP) bool€type.*"".IpOpttype.net.IP type.boolþDgo.string."func(*opts.IpOpt) bool"PNfunc(*opts.IpOpt) bool Dgo.string."func(*opts.IpOpt) bool"þ2type.func(*"".IpOpt) bool  £äÃ^3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."func(*opts.IpOpt) bool"pDgo.weak.type.*func(*"".IpOpt) bool€"runtime.zerovalue €2type.func(*"".IpOpt) boolÐ2type.func(*"".IpOpt) bool€type.*"".IpOpttype.boolþ\go.string."func(*opts.IpOpt) ([]uint8, error)"pf"func(*opts.IpOpt) ([]uint8, error) \go.string."func(*opts.IpOpt) ([]uint8, error)"þJtype.func(*"".IpOpt) ([]uint8, error)°°Bu3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(*opts.IpOpt) ([]uint8, error)"p\go.weak.type.*func(*"".IpOpt) ([]uint8, error)€"runtime.zerovalue €Jtype.func(*"".IpOpt) ([]uint8, error)ÐJtype.func(*"".IpOpt) ([]uint8, error)€type.*"".IpOpttype.[]uint8 type.errorþ`go.string."func(*opts.IpOpt, net.IPMask) net.IP"pj$func(*opts.IpOpt, net.IPMask) net.IP `go.string."func(*opts.IpOpt, net.IPMask) net.IP"þNtype.func(*"".IpOpt, net.IPMask) net.IP°°Þ iƒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(*opts.IpOpt, net.IPMask) net.IP"p`go.weak.type.*func(*"".IpOpt, net.IPMask) net.IP€"runtime.zerovalue €Ntype.func(*"".IpOpt, net.IPMask) net.IPРNtype.func(*"".IpOpt, net.IPMask) net.IP€type.*"".IpOpttype.net.IPMask type.net.IPþVgo.string."func(*opts.IpOpt, string) error"``func(*opts.IpOpt, string) error Vgo.string."func(*opts.IpOpt, string) error"þDtype.func(*"".IpOpt, string) error°° cž 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PVgo.string."func(*opts.IpOpt, string) error"pVgo.weak.type.*func(*"".IpOpt, string) error€"runtime.zerovalue €Dtype.func(*"".IpOpt, string) errorРDtype.func(*"".IpOpt, string) error€type.*"".IpOpttype.string type.errorþHgo.string."func(*opts.IpOpt) string"`Rfunc(*opts.IpOpt) string Hgo.string."func(*opts.IpOpt) string"þ6type.func(*"".IpOpt) string  uX«!3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."func(*opts.IpOpt) string"pHgo.weak.type.*func(*"".IpOpt) string€"runtime.zerovalue €6type.func(*"".IpOpt) stringÐ6type.func(*"".IpOpt) string€type.*"".IpOpttype.stringþHgo.string."func(*opts.IpOpt) net.IP"`Rfunc(*opts.IpOpt) net.IP Hgo.string."func(*opts.IpOpt) net.IP"þ6type.func(*"".IpOpt) net.IP  ´õŒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."func(*opts.IpOpt) net.IP"pHgo.weak.type.*func(*"".IpOpt) net.IP€"runtime.zerovalue €6type.func(*"".IpOpt) net.IPÐ6type.func(*"".IpOpt) net.IP€type.*"".IpOpttype.net.IPþXgo.string."func(*opts.IpOpt, []uint8) error"pb func(*opts.IpOpt, []uint8) error Xgo.string."func(*opts.IpOpt, []uint8) error"þFtype.func(*"".IpOpt, []uint8) error°°Oä3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."func(*opts.IpOpt, []uint8) error"pXgo.weak.type.*func(*"".IpOpt, []uint8) error€"runtime.zerovalue €Ftype.func(*"".IpOpt, []uint8) errorРFtype.func(*"".IpOpt, []uint8) error€type.*"".IpOpttype.[]uint8 type.errorþ.go.string."DefaultMask"@8 DefaultMask .go.string."DefaultMask"þ:go.string."func() net.IPMask"PDfunc() net.IPMask :go.string."func() net.IPMask"þ,type.func() net.IPMask'aØ'3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."func() net.IPMask"p>go.weak.type.*func() net.IPMask€"runtime.zerovalue €,type.func() net.IPMaskЀ,type.func() net.IPMask€type.net.IPMaskþ"go.string."Equal"0,Equal "go.string."Equal"þ:go.string."func(net.IP) bool"PDfunc(net.IP) bool :go.string."func(net.IP) bool"þ,type.func(net.IP) bool  ºÃ Ú3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."func(net.IP) bool"p>go.weak.type.*func(net.IP) bool€"runtime.zerovalue €,type.func(net.IP) boolÐ,type.func(net.IP) bool€type.net.IPtype.boolþ6go.string."IsGlobalUnicast"@@IsGlobalUnicast 6go.string."IsGlobalUnicast"þ.go.string."func() bool"@8 func() bool .go.string."func() bool"þ type.func() boolTËx3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P.go.string."func() bool"p2go.weak.type.*func() bool€"runtime.zerovalue € type.func() boolЀ type.func() bool€type.boolþJgo.string."IsInterfaceLocalMulticast"`TIsInterfaceLocalMulticast Jgo.string."IsInterfaceLocalMulticast"þ@go.string."IsLinkLocalMulticast"PJIsLinkLocalMulticast @go.string."IsLinkLocalMulticast"þgo.string."func([]uint8) error"PHfunc([]uint8) error >go.string."func([]uint8) error"þ0type.func([]uint8) error  _÷[:3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."func([]uint8) error"pBgo.weak.type.*func([]uint8) error€"runtime.zerovalue €0type.func([]uint8) errorÐ0type.func([]uint8) error€type.[]uint8type.errorþtype.*"".IpOptð ð ;‚g6°   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P.go.string."*opts.IpOpt"p.go.weak.type.**"".IpOpt€"runtime.zerovaluetype."".IpOpt` type.*"".IpOptÀðtype.*"".IpOptð.go.string."DefaultMask",type.func() net.IPMask >type.func(*"".IpOpt) net.IPMask°."".(*IpOpt).DefaultMaskÀ."".(*IpOpt).DefaultMaskÐ"go.string."Equal"ð,type.func(net.IP) bool€Btype.func(*"".IpOpt, net.IP) bool""".(*IpOpt).Equal """.(*IpOpt).Equal°6go.string."IsGlobalUnicast"Ð type.func() boolà2type.func(*"".IpOpt) boolð6"".(*IpOpt).IsGlobalUnicast€6"".(*IpOpt).IsGlobalUnicastJgo.string."IsInterfaceLocalMulticast"° type.func() boolÀ2type.func(*"".IpOpt) boolÐJ"".(*IpOpt).IsInterfaceLocalMulticastàJ"".(*IpOpt).IsInterfaceLocalMulticastð@go.string."IsLinkLocalMulticast" type.func() bool 2type.func(*"".IpOpt) bool°@"".(*IpOpt).IsLinkLocalMulticastÀ@"".(*IpOpt).IsLinkLocalMulticastÐ*opts.ListOpts 4go.string."*opts.ListOpts"þPgo.string."func(*opts.ListOpts, string)"`Zfunc(*opts.ListOpts, string) Pgo.string."func(*opts.ListOpts, string)"þ>type.func(*"".ListOpts, string)  ö§Ó3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(*opts.ListOpts, string)"pPgo.weak.type.*func(*"".ListOpts, string)€"runtime.zerovalue €>type.func(*"".ListOpts, string)Р>type.func(*"".ListOpts, string)€"type.*"".ListOptstype.stringþZgo.string."func(*opts.ListOpts, string) bool"pd!func(*opts.ListOpts, string) bool Zgo.string."func(*opts.ListOpts, string) bool"þHtype.func(*"".ListOpts, string) bool°°~‰É˜3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."func(*opts.ListOpts, string) bool"pZgo.weak.type.*func(*"".ListOpts, string) bool€"runtime.zerovalue €Htype.func(*"".ListOpts, string) boolРHtype.func(*"".ListOpts, string) bool€"type.*"".ListOptstype.string type.boolþRgo.string."func(*opts.ListOpts) []string"`\func(*opts.ListOpts) []string Rgo.string."func(*opts.ListOpts) []string"þ@type.func(*"".ListOpts) []string  ˜>c¨3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."func(*opts.ListOpts) []string"pRgo.weak.type.*func(*"".ListOpts) []string€"runtime.zerovalue €@type.func(*"".ListOpts) []stringÐ@type.func(*"".ListOpts) []string€"type.*"".ListOptstype.[]stringþ*go.string."struct {}"@4 struct {} *go.string."struct {}"þtype.struct {}ÀÀ¬ö'™  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."struct {}"p.go.weak.type.*struct {}€"runtime.zerovalueÀtype.struct {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc.[8]string(type..hash.[8]stringþ,type..eqfunc.[8]string$type..eq.[8]stringþ&type..alg.[8]string 0type..hashfunc.[8]string,type..eqfunc.[8]stringþbruntime.gcbits.0x48484848484848480000000000000000 HHHHHHHHþ*go.string."[8]string"@4 [8]string *go.string."[8]string"þtype.[8]stringÀÀ€USŒ> &type..alg.[8]string0bruntime.gcbits.0x48484848484848480000000000000000P*go.string."[8]string"p.go.weak.type.*[8]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[8]string/[8]stringtype.[8]stringþ.go.string."[]struct {}"@8 []struct {} .go.string."[]struct {}"þ type.[]struct {}  ºÌ¥…   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P.go.string."[]struct {}"p2go.weak.type.*[]struct {}€"runtime.zerovaluetype.struct {}þFgo.typelink.[]struct {}/[]struct {} type.[]struct {}þ0go.string."[8]struct {}"@: [8]struct {} 0go.string."[8]struct {}"þ"type.[8]struct {}ÀÀ>ƒy ‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P0go.string."[8]struct {}"p4go.weak.type.*[8]struct {}€"runtime.zerovaluetype.struct {}  type.[]struct {}þJgo.typelink.[8]struct {}/[8]struct {}"type.[8]struct {}þPgo.string."*map.bucket[string]struct {}"`Z*map.bucket[string]struct {} Pgo.string."*map.bucket[string]struct {}"þBtype.*map.bucket[string]struct {}  –Ò6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."*map.bucket[string]struct {}"pTgo.weak.type.**map.bucket[string]struct {}€"runtime.zerovalue@type.map.bucket[string]struct {}þbruntime.gcbits.0x84848484848484848400000000000000 „„„„„„„„„þNgo.string."map.bucket[string]struct {}"`Xmap.bucket[string]struct {} Ngo.string."map.bucket[string]struct {}"þ go.string."keys"0*keys go.string."keys"þ$go.string."values"0.values $go.string."values"þ(go.string."overflow"@2overflow (go.string."overflow"þ@type.map.bucket[string]struct {}°°@»‡ˆˆ à runtime.algarray0bruntime.gcbits.0x84848484848484848400000000000000PNgo.string."map.bucket[string]struct {}"pRgo.weak.type.*map.bucket[string]struct {}€"runtime.zerovalueÀ@type.map.bucket[string]struct {}À go.string."keys"àtype.[8]string$go.string."values"°"type.[8]struct {}à(go.string."overflow"€Btype.*map.bucket[string]struct {}þbruntime.gcbits.0x44844800000000000000000000000000 D„HþHgo.string."map.hdr[string]struct {}"`Rmap.hdr[string]struct {} Hgo.string."map.hdr[string]struct {}"þ&go.string."buckets"00buckets &go.string."buckets"þ,go.string."oldbuckets"@6 +oldbuckets ,go.string."oldbuckets"þ:type.map.hdr[string]struct {}àà0v¤×  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PHgo.string."map.hdr[string]struct {}"pLgo.weak.type.*map.hdr[string]struct {}€"runtime.zerovalueÀ:type.map.hdr[string]struct {}À&go.string."buckets"àBtype.*map.bucket[string]struct {},go.string."oldbuckets"°Btype.*map.bucket[string]struct {}þ@go.string."map[string]struct {}"PJmap[string]struct {} @go.string."map[string]struct {}"þ2type.map[string]struct {}ÜܸQRá5 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."map[string]struct {}"pDgo.weak.type.*map[string]struct {}€"runtime.zerovaluetype.string type.struct {}°@type.map.bucket[string]struct {}À:type.map.hdr[string]struct {}þjgo.typelink.map[string]struct {}/map[string]struct {}2type.map[string]struct {}þjgo.string."func(*opts.ListOpts) map[string]struct {}"€t)func(*opts.ListOpts) map[string]struct {} jgo.string."func(*opts.ListOpts) map[string]struct {}"þXtype.func(*"".ListOpts) map[string]struct {}  ¢¹ˆ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*opts.ListOpts) map[string]struct {}"pjgo.weak.type.*func(*"".ListOpts) map[string]struct {}€"runtime.zerovalue €Xtype.func(*"".ListOpts) map[string]struct {}ÐXtype.func(*"".ListOpts) map[string]struct {}€"type.*"".ListOpts2type.map[string]struct {}þHgo.string."func(*opts.ListOpts) int"`Rfunc(*opts.ListOpts) int Hgo.string."func(*opts.ListOpts) int"þ6type.func(*"".ListOpts) int  ¿< ©3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."func(*opts.ListOpts) int"pHgo.weak.type.*func(*"".ListOpts) int€"runtime.zerovalue €6type.func(*"".ListOpts) intÐ6type.func(*"".ListOpts) int€"type.*"".ListOptstype.intþ\go.string."func(*opts.ListOpts, string) error"pf"func(*opts.ListOpts, string) error \go.string."func(*opts.ListOpts, string) error"þJtype.func(*"".ListOpts, string) error°°ùE+£3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(*opts.ListOpts, string) error"p\go.weak.type.*func(*"".ListOpts, string) error€"runtime.zerovalue €Jtype.func(*"".ListOpts, string) errorРJtype.func(*"".ListOpts, string) error€"type.*"".ListOptstype.string type.errorþNgo.string."func(*opts.ListOpts) string"`Xfunc(*opts.ListOpts) string Ngo.string."func(*opts.ListOpts) string"þgo.weak.type.*func(string) bool€"runtime.zerovalue €,type.func(string) boolÐ,type.func(string) bool€type.stringtype.boolþ$go.string."GetAll"0.GetAll $go.string."GetAll"þ6go.string."func() []string"@@func() []string 6go.string."func() []string"þ(type.func() []string‚ãûÔ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."func() []string"p:go.weak.type.*func() []string€"runtime.zerovalue €(type.func() []stringЀ(type.func() []string€type.[]stringþ$go.string."GetMap"0.GetMap $go.string."GetMap"þNgo.string."func() map[string]struct {}"`Xfunc() map[string]struct {} Ngo.string."func() map[string]struct {}"þ@type.func() map[string]struct {}ª%¥3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."func() map[string]struct {}"pRgo.weak.type.*func() map[string]struct {}€"runtime.zerovalue €@type.func() map[string]struct {}Ѐ@type.func() map[string]struct {}€2type.map[string]struct {}þgo.string."Len"0(Len go.string."Len"þ,go.string."func() int"@6 +func() int ,go.string."func() int"þtype.func() intå†9à3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."func() int"p0go.weak.type.*func() int€"runtime.zerovalue €type.func() intЀtype.func() int€type.intþ"type.*"".ListOptsÃœ˜6V   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*opts.ListOpts"p4go.weak.type.**"".ListOpts€"runtime.zerovalue type."".ListOpts` "type.*"".ListOptsÀð"type.*"".ListOptsð$go.string."Delete""type.func(string) >type.func(*"".ListOpts, string)°*"".(*ListOpts).DeleteÀ*"".(*ListOpts).DeleteÐgo.string."Get"ð,type.func(string) bool€Htype.func(*"".ListOpts, string) bool$"".(*ListOpts).Get $"".(*ListOpts).Get°$go.string."GetAll"Ð(type.func() []stringà@type.func(*"".ListOpts) []stringð*"".(*ListOpts).GetAll€*"".(*ListOpts).GetAll$go.string."GetMap"°@type.func() map[string]struct {}ÀXtype.func(*"".ListOpts) map[string]struct {}Ð*"".(*ListOpts).GetMapà*"".(*ListOpts).GetMapðgo.string."Len"type.func() int 6type.func(*"".ListOpts) int°$"".(*ListOpts).LenÀ$"".(*ListOpts).LenÐgo.string."Set"ð.type.func(string) error€Jtype.func(*"".ListOpts, string) error$"".(*ListOpts).Set $"".(*ListOpts).Set°$go.string."String"Ð$type.func() stringàYˆ à runtime.algarray0Btype..gc.map.bucket[string]string@Jtype..gcprog.map.bucket[string]stringPHgo.string."map.bucket[string]string"pLgo.weak.type.*map.bucket[string]string€"runtime.zerovalueÀ:type.map.bucket[string]stringÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]stringà(go.string."overflow"€go.weak.type.*map[string]string€"runtime.zerovaluetype.string type.string°:type.map.bucket[string]stringÀ4type.map.hdr[string]stringþ^go.typelink.map[string]string/map[string]string,type.map[string]stringþ0go.string."opts.MapOpts"@: opts.MapOpts 0go.string."opts.MapOpts"þ&go.string."MapOpts"00MapOpts &go.string."MapOpts"þtype."".MapOpts°°#¤B­   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."opts.MapOpts"p type.*"".MapOpts€"runtime.zerovalueÀtype."".MapOptsÀ$go.string."values"Ð"go.importpath."".à,type.map[string]string*go.string."validator" "go.importpath."".°0type."".ValidatorFctType`àtype."".MapOptsà&go.string."MapOpts"ð"go.importpath."".€°type."".MapOptsþ2go.string."*opts.MapOpts"@< *opts.MapOpts 2go.string."*opts.MapOpts"þZgo.string."func(*opts.MapOpts, string) error"pd!func(*opts.MapOpts, string) error Zgo.string."func(*opts.MapOpts, string) error"þHtype.func(*"".MapOpts, string) error°°go 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."func(*opts.MapOpts, string) error"pZgo.weak.type.*func(*"".MapOpts, string) error€"runtime.zerovalue €Htype.func(*"".MapOpts, string) errorРHtype.func(*"".MapOpts, string) error€ type.*"".MapOptstype.string type.errorþLgo.string."func(*opts.MapOpts) string"`Vfunc(*opts.MapOpts) string Lgo.string."func(*opts.MapOpts) string"þ:type.func(*"".MapOpts) string  *êƒx3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(*opts.MapOpts) string"pLgo.weak.type.*func(*"".MapOpts) string€"runtime.zerovalue €:type.func(*"".MapOpts) stringÐ:type.func(*"".MapOpts) string€ type.*"".MapOptstype.stringþ type.*"".MapOpts°°7¨å’6$   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."*opts.MapOpts"p2go.weak.type.**"".MapOpts€"runtime.zerovaluetype."".MapOpts`  type.*"".MapOptsÀð type.*"".MapOptsðgo.string."Set".type.func(string) error Htype.func(*"".MapOpts, string) error°""".(*MapOpts).SetÀ""".(*MapOpts).SetÐ$go.string."String"ð$type.func() string€:type.func(*"".MapOpts) string("".(*MapOpts).String ("".(*MapOpts).StringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc.[3]string(type..hash.[3]stringþ,type..eqfunc.[3]string$type..eq.[3]stringþ&type..alg.[3]string 0type..hashfunc.[3]string,type..eqfunc.[3]stringþbruntime.gcbits.0x48484800000000000000000000000000 HHHþ*go.string."[3]string"@4 [3]string *go.string."[3]string"þtype.[3]stringÀÀ0CÙiB &type..alg.[3]string0bruntime.gcbits.0x48484800000000000000000000000000P*go.string."[3]string"p.go.weak.type.*[3]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[3]string/[3]stringtype.[3]stringþ,go.string."*[3]string"@6 +*[3]string ,go.string."*[3]string"þtype.*[3]string   ++é 6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[3]string"p0go.weak.type.**[3]string€"runtime.zerovaluetype.[3]stringþ$go.string."[]bool"0.[]bool $go.string."[]bool"þtype.[]bool  ±åç   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P$go.string."[]bool"p(go.weak.type.*[]bool€"runtime.zerovaluetype.boolþ2go.typelink.[]bool/[]booltype.[]boolþ&go.string."[8]bool"00[8]bool &go.string."[8]bool"þtype.[8]boolÀÀs£5‘   runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P&go.string."[8]bool"p*go.weak.type.*[8]bool€"runtime.zerovaluetype.bool type.[]boolþ6go.typelink.[8]bool/[8]booltype.[8]boolþFgo.string."*map.bucket[string]bool"PP*map.bucket[string]bool Fgo.string."*map.bucket[string]bool"þ8type.*map.bucket[string]bool  ë[ÔE6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."*map.bucket[string]bool"pJgo.weak.type.**map.bucket[string]bool€"runtime.zerovalue6type.map.bucket[string]boolþ,>type..gc.map.bucket[string]bool(þFtype..gcprog.map.bucket[string]bool™™™™%þDgo.string."map.bucket[string]bool"PNmap.bucket[string]bool Dgo.string."map.bucket[string]bool"þ6type.map.bucket[string]bool°°˜2aBÝYˆ à runtime.algarray0>type..gc.map.bucket[string]bool@Ftype..gcprog.map.bucket[string]boolPDgo.string."map.bucket[string]bool"pHgo.weak.type.*map.bucket[string]bool€"runtime.zerovalueÀ6type.map.bucket[string]boolÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]boolà(go.string."overflow"€8type.*map.bucket[string]boolþ>go.string."map.hdr[string]bool"PHmap.hdr[string]bool >go.string."map.hdr[string]bool"þ0type.map.hdr[string]boolàà03‡(  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000P>go.string."map.hdr[string]bool"pBgo.weak.type.*map.hdr[string]bool€"runtime.zerovalueÀ0type.map.hdr[string]boolÀ&go.string."buckets"à8type.*map.bucket[string]bool,go.string."oldbuckets"°8type.*map.bucket[string]boolþ6go.string."map[string]bool"@@map[string]bool 6go.string."map[string]bool"þ(type.map[string]boolÜÜñÓ5˜ € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."map[string]bool"p:go.weak.type.*map[string]bool€"runtime.zerovaluetype.string type.bool°6type.map.bucket[string]boolÀ0type.map.hdr[string]boolþVgo.typelink.map[string]bool/map[string]bool(type.map[string]boolþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.typelink.[][]uint8/[][]uint8type.[][]uint8þ8go.string."[]*ulimit.Ulimit"PB[]*ulimit.Ulimit 8go.string."[]*ulimit.Ulimit"þºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit  ȯ   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P8go.string."[]*ulimit.Ulimit"pÌgo.weak.type.*[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalue¶type.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþêgo.typelink.[]*ulimit.Ulimit/[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþbruntime.gcbits.0x88888888000000000000000000000000 ˆˆˆˆþ:go.string."[8]*ulimit.Ulimit"PD[8]*ulimit.Ulimit :go.string."[8]*ulimit.Ulimit"þ¼type.[8]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÀÀ@§…@  runtime.algarray0bruntime.gcbits.0x88888888000000000000000000000000P:go.string."[8]*ulimit.Ulimit"pÎgo.weak.type.*[8]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalue¶type.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit ºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþîgo.typelink.[8]*ulimit.Ulimit/[8]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit¼type.[8]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitþZgo.string."*map.bucket[string]*ulimit.Ulimit"pd!*map.bucket[string]*ulimit.Ulimit Zgo.string."*map.bucket[string]*ulimit.Ulimit"þÜtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit  Ì‚ž6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."*map.bucket[string]*ulimit.Ulimit"pîgo.weak.type.**map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalueÚtype.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþbruntime.gcbits.0x84848484848484848488888888000000 „„„„„„„„„ˆˆˆˆþXgo.string."map.bucket[string]*ulimit.Ulimit"pb map.bucket[string]*ulimit.Ulimit Xgo.string."map.bucket[string]*ulimit.Ulimit"þÚtype.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit°°Ð*ŽèˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000PXgo.string."map.bucket[string]*ulimit.Ulimit"pìgo.weak.type.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalueÀÚtype.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÀ go.string."keys"àtype.[8]string$go.string."values"°¼type.[8]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimità(go.string."overflow"€Ütype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitþRgo.string."map.hdr[string]*ulimit.Ulimit"`\map.hdr[string]*ulimit.Ulimit Rgo.string."map.hdr[string]*ulimit.Ulimit"þÔtype.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitàà0‡I:"  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PRgo.string."map.hdr[string]*ulimit.Ulimit"pægo.weak.type.*map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalueÀÔtype.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÀ&go.string."buckets"àÜtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit,go.string."oldbuckets"°Ütype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitþJgo.string."map[string]*ulimit.Ulimit"`Tmap[string]*ulimit.Ulimit Jgo.string."map[string]*ulimit.Ulimit"þÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÜÜ‘¼pt5Ð € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."map[string]*ulimit.Ulimit"pÞgo.weak.type.*map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovaluetype.string ¶type.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit°Útype.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÀÔtype.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitþŽgo.typelink.map[string]*ulimit.Ulimit/map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitþLgo.string."*map[string]*ulimit.Ulimit"`V*map[string]*ulimit.Ulimit Lgo.string."*map[string]*ulimit.Ulimit"þÎtype.*map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit  †»Á6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."*map[string]*ulimit.Ulimit"pàgo.weak.type.**map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalueÌtype.map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþ6go.string."*opts.UlimitOpt"@@*opts.UlimitOpt 6go.string."*opts.UlimitOpt"þdgo.string."func(*opts.UlimitOpt) []*ulimit.Ulimit"pn&func(*opts.UlimitOpt) []*ulimit.Ulimit dgo.string."func(*opts.UlimitOpt) []*ulimit.Ulimit"þâtype.func(*"".UlimitOpt) []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit  昩3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pdgo.string."func(*opts.UlimitOpt) []*ulimit.Ulimit"pôgo.weak.type.*func(*"".UlimitOpt) []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalue €âtype.func(*"".UlimitOpt) []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÐâtype.func(*"".UlimitOpt) []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€$type.*"".UlimitOptºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþ^go.string."func(*opts.UlimitOpt, string) error"ph#func(*opts.UlimitOpt, string) error ^go.string."func(*opts.UlimitOpt, string) error"þLtype.func(*"".UlimitOpt, string) error°°U¿p3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*opts.UlimitOpt, string) error"p^go.weak.type.*func(*"".UlimitOpt, string) error€"runtime.zerovalue €Ltype.func(*"".UlimitOpt, string) errorРLtype.func(*"".UlimitOpt, string) error€$type.*"".UlimitOpttype.string type.errorþPgo.string."func(*opts.UlimitOpt) string"`Zfunc(*opts.UlimitOpt) string Pgo.string."func(*opts.UlimitOpt) string"þ>type.func(*"".UlimitOpt) string  _ت3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(*opts.UlimitOpt) string"pPgo.weak.type.*func(*"".UlimitOpt) string€"runtime.zerovalue €>type.func(*"".UlimitOpt) stringÐ>type.func(*"".UlimitOpt) string€$type.*"".UlimitOpttype.stringþ&go.string."GetList"00GetList &go.string."GetList"þFgo.string."func() []*ulimit.Ulimit"PPfunc() []*ulimit.Ulimit Fgo.string."func() []*ulimit.Ulimit"þÈtype.func() []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit:¥x3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."func() []*ulimit.Ulimit"pÚgo.weak.type.*func() []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalue €Ètype.func() []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitЀÈtype.func() []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€ºtype.[]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþ$type.*"".UlimitOptmÝQ6.   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*opts.UlimitOpt"p6go.weak.type.**"".UlimitOpt€"runtime.zerovalue"type."".UlimitOpt` $type.*"".UlimitOptÀð$type.*"".UlimitOptð&go.string."GetList"Ètype.func() []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit âtype.func(*"".UlimitOpt) []*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit°."".(*UlimitOpt).GetListÀ."".(*UlimitOpt).GetListÐgo.string."Set"ð.type.func(string) error€Ltype.func(*"".UlimitOpt, string) error&"".(*UlimitOpt).Set &"".(*UlimitOpt).Set°$go.string."String"Ð$type.func() stringà>type.func(*"".UlimitOpt) stringð,"".(*UlimitOpt).String€,"".(*UlimitOpt).Stringþ4go.string."opts.UlimitOpt"@>opts.UlimitOpt 4go.string."opts.UlimitOpt"þ*go.string."UlimitOpt"@4 UlimitOpt *go.string."UlimitOpt"þ"type."".UlimitOptàà3Ìëe9   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."opts.UlimitOpt"p$type.*"".UlimitOpt€"runtime.zerovalueÀ"type."".UlimitOptÀ$go.string."values"Ð"go.importpath."".àÎtype.*map[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit`"type."".UlimitOpt*go.string."UlimitOpt" "go.importpath."".°à"type."".UlimitOptþ6go.string."**ulimit.Ulimit"@@**ulimit.Ulimit 6go.string."**ulimit.Ulimit"þ¸type.**github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit  3Ï*™6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."**ulimit.Ulimit"pÊgo.weak.type.***github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalue¶type.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitþTgo.string."*map.hdr[string]*ulimit.Ulimit"`^*map.hdr[string]*ulimit.Ulimit Tgo.string."*map.hdr[string]*ulimit.Ulimit"þÖtype.*map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit  ´›W«6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*map.hdr[string]*ulimit.Ulimit"pègo.weak.type.**map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalueÔtype.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitþ*go.string."[]uintptr"@4 []uintptr *go.string."[]uintptr"þtype.[]uintptr  »3À]   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P*go.string."[]uintptr"p.go.weak.type.*[]uintptr€"runtime.zerovaluetype.uintptrþ>go.typelink.[]uintptr/[]uintptrtype.[]uintptrþ,go.string."[4]uintptr"@6 +[4]uintptr ,go.string."[4]uintptr"þtype.[4]uintptrÀÀ l<‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P,go.string."[4]uintptr"p0go.weak.type.*[4]uintptr€"runtime.zerovaluetype.uintptr type.[]uintptrþBgo.typelink.[4]uintptr/[4]uintptrtype.[4]uintptrþbruntime.gcbits.0x88888844440000000000000000000000 ˆˆˆDDþTgo.string."map.iter[string]*ulimit.Ulimit"`^map.iter[string]*ulimit.Ulimit Tgo.string."map.iter[string]*ulimit.Ulimit"þgo.string."key"0(key go.string."key"þgo.string."val"0(val go.string."val"þgo.string."t"0$t go.string."t"þgo.string."h"0$h go.string."h"þ go.string."bptr"0*bptr go.string."bptr"þ"go.string."other"0,other "go.string."other"þÖtype.map.iter[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitððPæŸ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PTgo.string."map.iter[string]*ulimit.Ulimit"pègo.weak.type.*map.iter[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€"runtime.zerovalueÀÖtype.map.iter[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÀgo.string."key"àtype.*stringgo.string."val"°¸type.**github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimitàgo.string."t"€type.*uint8°go.string."h"ÐÖtype.*map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit€&go.string."buckets" Ütype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.UlimitÐ go.string."bptr"ðÜtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit.Ulimit "go.string."other"Àtype.[4]uintptrþLgo.string."*opts.ValidatorFctListType"`V*opts.ValidatorFctListType Lgo.string."*opts.ValidatorFctListType"þ:type.*"".ValidatorFctListType  D?+°6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."*opts.ValidatorFctListType"pLgo.weak.type.**"".ValidatorFctListType€"runtime.zerovalue8type."".ValidatorFctListTypeþJgo.string."opts.ValidatorFctListType"`Topts.ValidatorFctListType Jgo.string."opts.ValidatorFctListType"þ@go.string."ValidatorFctListType"PJValidatorFctListType @go.string."ValidatorFctListType"þ8type."".ValidatorFctListType€€-W3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."opts.ValidatorFctListType"p:type.*"".ValidatorFctListType€"runtime.zerovalue €8type."".ValidatorFctListTypeÐ8type."".ValidatorFctListType€type.stringtype.[]string type.error`°8type."".ValidatorFctListType°@go.string."ValidatorFctListType"À"go.importpath."".Ѐ8type."".ValidatorFctListTypeþ,go.string."*[8]string"@6 +*[8]string ,go.string."*[8]string"þtype.*[8]string  ­”o6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[8]string"p0go.weak.type.**[8]string€"runtime.zerovaluetype.[8]stringþgo.string."net"0(net go.string."net"þ$go.importpath.net.  go.string."net"þ&go.string."runtime"00runtime &go.string."runtime"þ,go.importpath.runtime.  &go.string."runtime"þ"go.string."bufio"0,bufio "go.string."bufio"þ(go.importpath.bufio.  "go.string."bufio"þ¬go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"À¶Jgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume ¬go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"þ²go.importpath.github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume. J ¬go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"þ´go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"À¾Ngithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit ´go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"þºgo.importpath.github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit. N ´go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"þgo.string."os"0&os go.string."os"þ"go.importpath.os.  go.string."os"þ&go.string."strings"00strings &go.string."strings"þ,go.importpath.strings.  &go.string."strings"þgo.string."fmt"0(fmt go.string."fmt"þ$go.importpath.fmt.  go.string."fmt"þ$go.string."regexp"0.regexp $go.string."regexp"þ*go.importpath.regexp.  $go.string."regexp"þ¶go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"ÀÀOgithub.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers ¶go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"þ¼go.importpath.github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers. O ¶go.string."github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"þ go.string."path"0*path go.string."path"þ&go.importpath.path.  go.string."path"þ.type..hash.[0]string·f(type..hash.[0]stringþ$runtime.strhash·fruntime.strhashþ*type..eq.[0]string·f$type..eq.[0]stringþ@"".(*ErrBadEnvVariable).Error·f:"".(*ErrBadEnvVariable).Errorþ(runtime.panicwrap·f"runtime.panicwrapþ:type..hash.[1]interface {}·f4type..hash.[1]interface {}þ.runtime.nilinterhash·f(runtime.nilinterhashþ6type..eq.[1]interface {}·f0type..eq.[1]interface {}þ$runtime.efaceeq·fruntime.efaceeqþ:type..hash.[2]interface {}·f4type..hash.[2]interface {}þ6type..eq.[2]interface {}·f0type..eq.[2]interface {}þ4"".(*IpOpt).DefaultMask·f."".(*IpOpt).DefaultMaskþ*net.IP.DefaultMask·f$net.IP.DefaultMaskþ("".(*IpOpt).Equal·f""".(*IpOpt).Equalþnet.IP.Equal·fnet.IP.Equalþ<"".(*IpOpt).IsGlobalUnicast·f6"".(*IpOpt).IsGlobalUnicastþ2net.IP.IsGlobalUnicast·f,net.IP.IsGlobalUnicastþP"".(*IpOpt).IsInterfaceLocalMulticast·fJ"".(*IpOpt).IsInterfaceLocalMulticastþF"".(*IpOpt).IsLinkLocalMulticast·f@"".(*IpOpt).IsLinkLocalMulticastþ@;eXkP|5lq-Z9;$0n6FXRUckfE7C9Sf% zvW0O#txbqg5?rQb=rq%4=oFZ~Ff%mi3^UZkyaqC@X&8o1DdXu3h2SuRwtv8s%Kg6c zxcA(9&wZ>c*$K&wvv+mR`Mz_`cYfdFyzbs`i`l45-P(QdU~>FD@7n#&I}+?N_#JQT zJNwLKGBhQhP4gTR-IMi1z5M%DrIo0fjU&@@33IlZ%Ooc2qsdG%mDsmyzF}3Sn^v=B zRt|M{4-R(E!xwYXN*u1wEX-Qn*G$jO*Bh-wV_~i}J!>Tf_?Ln1Yby1*W-Bo^Fp%7B z)y##N*6zmiVXJYs3E7&h#`N4|;%$k6nR>;Xf!wVR%v&_`y_9icdah+PYG%bsd@$h< z4&9U(x{XucbaUdaIut8E8&2Gouo{he0}`OMU+9s#y+QM9grJ6I-RVazJ@~(Q-N(>>6WW@so>gqYvn6Bb-#=M^xSkSF?3V+HHXcH+>XSoIlrIb=zeUil!3AI zWkC3ngOl}x$sAVzTSM62Ds4#@jC6`7x2-KVbQ1!al%A<-WK|zRgm`~jXpWxREKP?@9e!R4N=it85^6tTj(xb1ZIZo~vEG_e|2L`)JC+{2_ z!;ikw$=!n`{Mb(EOUnla6TMHqW$fXZ!I4;O44X|s%_g~YY;c4u?2pAQyi!a5M;5O8 z`tHFg();9r!Q{gS2B%BMzdC*vA88jZZ0qjYf~_Sa(Y1*XgPRoqe}fsLgLCytYq13x z?(XU#WC-m?(gO)dtYYF;5IsjfJX+C^W5(uaf=R1b&F1LAn%Qgtq9D%?v@oXsMN=tP z1~s7gJ3X4~FZD34k9F8}wYh$)BbuwRq7ky?dT67$(5km)=MNf&ktOfYw@{g&ZV*pu zhTNuaqp3`Ryyx!TOaVzoTdvSzHwhFKeV7c^wP zIdbqwy>SQ#E0&j})~qiaHG$V#tM$qut5KPmw&q$R)*?8@Idewl&N7Vf8*|Mg?3SFY z>#O>9keqF(2{{jlc}5E~ItA44F*G`*5Ti3>TMMuoDx)(F3SBtcz3!T9YeyVSHs%T0 z(Tq?-&4PqY|`gR_hP?(FVVXJWjCxDWDs##*-wCH zMy*P&Y?MJ6Um74o^+azCCKe3rCpHD*YpC~0>c5u$m$bIq-}Qy705C&msq^Z+_Ul(c zyNMF!AYBqN(^QxC?zyKkvwDeGVc` z5cnSD(|PnwBH&N9rW$n^L16cjC2)|MpUtUIulhymm#=sH@*xZSIdJ=)!rlIaW=@LC zM=$^_gY})Z8uQeHKNRX~AE&8q5B^2^YyjF0qn*}tg?rKh)Zlz@(w|cgJt!&yH8z{g z>B+fKAp0iu=s}^$^ORE2-p4xaK!Anpz-pxvg&oq~FH@qRvc9We`8<8*ZSSZ7rG4ru ztbsD=dsksa)a_jbGDWwOad6mf6Fdsy(feow0s}p7_)#RiQsQ>Ke|*P1+W2?$d%D26 zpXwmnj=l5Ek84{V*S1}U9zL)CtL`$s=~TfeTNMk!x=N**PFKqLOtz4(Syr_?S}WFM z4F0Gq{z(tC#0Gl$5*SP{UkS9WrR5ngaK)cIXrUPd z7Z2v}o29Ru=_q~aA1fdK(xIR4A*J5Wf4_9{`#8KPl#YW5-wnpHkJ?dg2m6o_kki44 zFr0S|ZpVmX2(2eC1)2RwX*5zEWREX}41X2u7y!iw(ENi%KYq&~X_aY!OAR|DfCfOwY)8ak z*j<^2OUFAwK`lBB8ze9Tw=<&WUKu$A&u!mryNC!O>Ps-C_|xFEwkbZG5Xfi4 ziDx)`=}>vTbo^J~Lw-zksv53>?l;X3LVzjjU+u%$1z`6It z$LLer^E`l_XP@vq6FBOio(D9Cd!BK{fa7NLJY&N16oltlc%y$HlEHzoYbD%gSU?7QtYoSaBE|H@(`WZQaYz|;GDL)qp$R6xN}PT){ETq zsUnO|Vt5n}RRIPr?NjB+ho#|;hh>h`rNlM3(0Hamc+q&dVbpX^`-S$;ns!dxOh4a@ zE>uSsN|(o;);d4r@}V^|pEA<5YSyY23Z`Wii{*UT%H~tGypgxchCFJ@WwWWAnXMUl z!^qT(R3??F!BR#!Uo2Lt)znL7<-+Adv+L(WaS20AcC|aB_7e1=>9C2m^u|rJ2~`O1 zxmSE>EFT&!#JE0GApaE~dKr9Z#)$BtMehL5?Yh#1wfN9{}MbaQz$LJFH!o)zqMXfdh{3s%Eg-( zI!Y(si1UUM;5Z3>!6hB~1?Gg=mqq%8&gVcNO(*doWX2VIcm|O&j5^`VSZR5G;^MRD zd|`HSFRW?7Le~(Bh)>SIvH(QXQy5^+V?@0O@o(X58!_(xXaqL*;kBee&AM2$mm(9L zTj^+o?&c)y=_ncN+(Dm$gxDTYvIV{-&AIxV0W;&Sy-a_#pFu*VCTtW zS{+wGPdlqeNA;X5fJ>({u(hdN$)$=Jh}X)+e63b4mSOtag3XNCwpTx|zXkV|OzKA=54(Phy(2o^^|NWyJkCDt)pxiA zpD)y^`Mhc5&2l!AHjP}mk~Y&urdZ6UYgXBkBj-5F&I-XxO2#%46Wr`Z9tu`6z`P`S zLl*tG5dA(OdYJ6wDg}zfRSIT8nM%O`c)!8A1Nsy9!9aJ=0V&L(!oHn0)Cb0aTzw{> z5}O=7k+sLABep7V1qxFW=&JFldwM`iPJxzm!3T8Kls@F{ELd7KtYSW!9jzF!nGaWm zs-vJTsa&;IEEm?HvxaPa=Z0~Eb%olyE2=BJb6mR8eWi2-V;okYB;7$Dx#kt3Dq})b zO5%%Od0dJ@?bZz1Eu6Ul3no+srfSglfPSr2d7g=*JndLGr`N`h-m@9h29}??Hp2&~ zO-?Vfa4AW2fLD>e@Imd>`lHr%>=Y8qDXYnh#KT z`5YYGpMFU`TsVV5I0z=Zgk(HZv5!1B->`Snzs?Lf zZh*z1`6`_5@b8R2r{6&fj#znpT38a0B)e=O@V|wb*r?}%uekGKiTGRV2{3R5+C3RT zXvh#j6N&^1(Nmf?1waR;70!D4QUFFYn|;1MMqZhW3B|=zQA4kFKE0k!e=x8 zLp1sF^+mT|wp0oIvZeM#C0nlkO4(9gO4)K%Phx!KJ$n1}Q#Ck(0QxAPfG&;@I%xOi zg)FN*yW}$-?h~T{yu#0zuJ#NIj`ZvdJE|SP>PAIYS}Ppo+9&%0h&`wOpe<-T@T^V% zfpd}z@(#I*4xv&W1zPQq&Wg|zJppCfyEtb}aqwrg$CjYyfKLFAkn=ICJv&7^oHIHB zY@`}Xr+HR;%7VjWr99ngCXK~Rp^(XB;Jj_XML>N?qI6=n02#Z3P^G9~KR>Mj-g%4= zI9FoK6o(GaxK>)Fr!cQ?$2y@vm}=G*2(pT5#b?EM|) z%p{zduv2_f^CI1GtLEb~y3Hgcye6r6#_bQ^p}tw#N@IkR9v zh0t6O;+kKlnhd;<&CK-Np`_q6BEG1sf=9H-WS#tePI|m!)|kx9l3GcMUUie z`p7MlF8Y2(Z_rMlhmG((5b}`?g++BTOBt~m!z{&mAexElcnGq%k+A>}O1!Dk(WXit zoz%Ps*Bm?n#aTd|urL+0GX?oSuNmO_Ut*VC8!$#P*2QrxW&koG+co@)bAX7A!UAZq zhHRQ#A~*v!;EcA!IlcFHAGsC2{ucl9l5fI9^?*%)aigj)Lac%;I5_a^xu9~?Dg?Hc znguxC?lKEPltA6CU09*ULZRumy3B?)%O=NZ3`Y-(G4}SdOw)N^((Jn;?=d$$l4Hhx_Q`)PJ0!q2Mjfxa(JY}Yc z`&{Bg^7s3ky$7J4Kbfkt4wOE@M9JM=kow&a*{A~1z%Kg09l~hZ(Ceky#rcFk$|c%BY}&jT_`Zc_H}U1g%8FXl{yF^ zZn0xW`29h_XU5zpVIXn{-hs@ReGy=|u>ZU71pAT`by@43#*!hRT7!T&tYWsC!tC8{m<7oo2JCWFi0bYG z8LbZA%3S1R1z%O9=wkw_)R?h{4ds)QmH9?}GB7jx9qqb5h3ie7K(t!|k(xil(A}cH zXgCRfs55g2#o>xomM2LLSs!RJ=jNN3!z9kxPl0@7*FXb#&dKK{S)#&C3M%9#50~J@ zYmM4W9jCd+6ay`h%sHwlk!NT(>!&*iIoVrzi_*aX!RS>3`E}kfZOp@%uDS) zQF3EpwFe_%v08+povJqc9?GV=j*}~Lauti@hxL~u1m+dFK`=!1MEt~4Iddf)GQ_C_ z(Te&^)!{iti0#t4IddliXB$L~oZ0A7+1~TeSLbncjb7*j8G2IS^2Gj2- zF5p=tXabBp$~8s^{9{)yzk70VkcRR>q^j48^dG=<0lfQ=^Zv!N9sLmIW%-}$e(NI| z`4rg#7+dNH9$EM?78zx8j73O+;%k==U$W4l$r0G{O)suc^Y0pqo~DmtIg~J*uJ+su zmBk2>PF#9=e#G-PQ4Dg`y^eTzInQS9WkETXXu}vTZ_G!UG7H!CED3Vs_a6!q2#UC!GwAzCLfL7DJUv%*+ z=*1CC2@i;Vr|<5BKSF6Z+}kJvRbe-6XawUG2vVGN&A)#1uAjZQ=epaH0fi>|d3oY9 z^h9zbI7uvQuJ$~pL9y5Y&bnw2BG_VSP^v2Qh|{x(fEP(15_z(n`cANJWHVO_I$(6w zKk_8K%>x5&Tw3MJtM^eYkd{{?UhCqINn8io!>Li|_p$=TLTosJBCfn!&?h4{f?;7i zPM}M+5hW!8Aaif9YGxLteoC$0B=($a7O zcdu-bLuJEE^82w+ysl?A5>+6WcvKO)AHh~)%))^ZuN(Pjvpum=JHO9k=R><3WVplv1+g;e-|7-42w9;_ z;@aa+xSc|14M(Cd;x|;Wu!SIe#V@S(_>5loPEUoQcKL;HYRQ@jzYtEKC{FSV;j~zo ztkmJTL&mLbib(Aj!b``x`sbwBPQai~!(emjuoF=J$AB$G;h`l5+5Au<9*4V(Qz*dz z*csdAhvp- zM#wGDJM?-Lzq|5qcNM||)tK!D_7I+exc!ulz{Bg)v){fRoDsf)ZbL5^_0UV)MPWEl z*_c(WP}xvtgzH|?^pRUmp}q&l21`0o$(InErVLD6qLYuFiAwjDBwmjphK6DOLydrn zarhB8AvdulNu8O)L*5Xg&d8#e9G#a?=OFaWDJwQ$KL3S<&s+|`07~g4?xNx1zH!T; z@uWr(1ik9g(AHu#HfR`jwj z&~WA9oh&<3S2}Yb@{S(oSBM#k?2Z*%#heVBc@QxqD=0ms}#GsWPSzq*J{ z+~U%nXfUTiR`NDld}9vdEHZ6}nQk`5WldnTE#F@C-s!7PzWUqyx7e5hQ4!{as$&1uMPfL?3QW_si`x zB~k0k8O9DQk8f|nIYRafj~4j5F6DQ>x7O zLElRuLzH~u#D(#CiiL_0jK|H+qQ`ACqGtdF=`wBigPJM0H`K(lL&Dh5mb&nL6C2}# zx0)biujo3lZ!0iA2X#!|2<413DzOpF3-^w5AA9(9pS~Ob7JwFa(Ry*;xN&Z5xly>@ zp@6GThH*d|)(18;!!t2J`mx6_>BXKZd}j_105?WR;4p_};wTH_g3xk&Gbi&5bDsLl zM@L_J!yF+e!j^9o!yM7ts7OQ{^#RP-B+9XgC=$CSXl%kcu!#so+$4Bl$H@Mgy>UAr zDi*Kfra`uf%FB(tS~!54c<-=}o1pS?6LrSnjW1$%HCV2jph9l8Lh#&Vr2_`q9U@|X z%R_H9*CS#Frt}hb{^w=8CBT(mwg}vjuO|W*GLFK%9D&<3XIO+SIYog?5q-z?%`6^Q z#na3fs^e+qq-7jAs60a>>Tm-*(Wjr&oru`W)_?b+-L_;qqcB^Exn$?JumgQf4sH+;x^d9t)p1=8f&Mep>FyV#U|?u znFMy8Nd3AKqPd`bt2RI%Or z#?3XhBLUjWd);was5D~qdz_td?0}UB=OccTkAY!ANcau>O9U0i18(q+T@FHmnaxP% zZd82h^v7mi+^A?%4ip%k@nw`uVqhcoCPa62L$|sF!XuPQ36D?;oY!*T5txBm<0~pvZbL815 z?M}uO&M(^TH-MZ4cX-m#&Ph4gZ^0BldLKYlmRVIYFlXLfYPM4g_1K3u%{XLuWSV{m3}CQ5C5?j@U_8g;F84EzyMnB zuR9^-jSeLnQ3cRSj`oznX(w2B8jYg^&H9OF$KTzxyIu&S^u+ zmVN<)u9kWzacw4`P>GVRk0Pbi628zGK&<%v2U@bQwaz&Yoh}Dlyf|4OP0v*SAH4k` Awg3PC literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user.a b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user.a new file mode 100644 index 0000000000000000000000000000000000000000..7562c1c227a779cb80498e4ffd26756e278be1d8 GIT binary patch literal 125556 zcmeFa3!Ge4l`nkibf?pKkZv(TM8u+PunD2N-Vb1~kpP8eG|FS5pybr4Q%OswtLd(W zpfEJ1(X^dNH}BT;KpDU*tBsKonI{fX$h3rEKsduZGl zuUhs^ix>CyE*`~=wZVx_j5}kC7Y%J1t&CNpV-q9Qp-oP-N8a?fDaDEMP$k;Km)cur zY^*Xi9_{g;o8T{|*9F-oIyN>T3L;ziZlMjcMR7K4tWg$Yd9N2Bky zFl5o*MbWj^__+IU?NBK?G*Z1NdZl~g*cgn;T&QRzuiogOdm<@%$Kplhi4mJyyKG`? z%o(Y2qkZQ^``+&MVO8|)qHpJE+1FGmH%*L++%y(ip%HZP@kf)Y_8w>4zA&~q*3)ur z=zkwqHdLZ|FsRU??krt7R+$*3CYw!@J3DKJ#;eimKNvm-qRQGN@Nlx; z=b>qZY9cn&PegdxE1fFA#jT~aR&`@kbYBdz)^`BXm{Xk?8;SM|I8}RKlneB%ijE8o z6FA6e1zPmK&#_9E4Oa*Vu@KH!*|HrJ=U1X~2H5j1ic(Ij8xI#@UHVuT8> z0xVG~3P?!ufS%iN*=dov)_$$~8UUgYPsKb2%ZpPRs|)(7sur5pQ1^Y-2q=?8TDPvX z*4Vfsmf?z_VMoC8C>ErK7!1?be~`e1@JAn=Z~A>xf2{MY^-cAQz=e8Nn{sEYLI1*r ziJ<~QRHgc0Y3Y0V%bfoBqp2v>pwSj?t5~TFp9lJ^lZ)W_32EPLd(1zUSF(8qzZMob z9AkMwI9W^`1E_xp+H$l~6LsjQmV+yAvK}pV;HcK)sM>egz~!4Ao4Jw83F_J6)gY(i zAW+OKHLmC>3i+z*T@)$thbC!_{$u@s(6WRU(oFouvvNe>5!_ zq)6F7977~zWCAaOuaW>346>yf5#U2S_&D^EdZ^O_t)GuSn(-vH-qTAsIhIab_RYg$ZEN}nQLNu7MUDk$j8ayQly(%&3M1H7*b@v zb*hvw`>oEQ3jS1(vWz8PN)w{1xFZR*-!cxHX8Fr)Kv$}W1n(5WO>k3<5HVV_%^scQIsN$`tMOD0g7e5xZ@8R|e z(=QgLe<-S*-pOsQnHKHs5O42#lCDn_rdRbAwj4IPruL&Rh3y}TUj5+}oa6oPFWf!X zb@!k1>o;?cr_byy%-nzuPTznI;;uk<-J-(nH}tM3Y)5U}bm33&-^{MwLp0J_rkN@- zVT_|R#wd+(Gu6{qnBLVpNm()A7!5f0iVb+n|24YqzL(nCE=J|;GltP!c)AVgSfR^0 z<+YoxJWlFdA9u6$C7`jar}?L~rv=W zo_F}jk^aSD21DzfF+YI%g*k76`mp|f+ zRgkhXLSY&#YWQi+d7vS#+k&r`hmJ6)j4B$zoVUsoTcicZ_ld|S0Uj!PUu6YH3?4ec%=Sf zL1^bc-x1pR{YVP!{Kpnj4{z8|=@;J*{XHX<=q9UbZv-ug;wy1%$Qg%V9~EpZT87Ng zgRUG{NR=)p`gA#cbeHk&-pl@zC<1UYmY;ZZ{fW3gDyFw7Oaig-`5hvCbRZI&Tpd_B^*inMW$ zX)fnRnEV^vXp8v#xbb4d6fkuC2UYt^af58eL*6o{;Q;~CTJ)d5d@;2Bi#562S5 z{z$vtn0OsGy4$_uqzbX-5c3S9y`Sr2xgM`+9?pxBL~ne^?AhNo8+oP;v-kRLv%8}! z()B&@_{ZXM5vMKZzg9K-|c7Z&til`{tw`)I54R6ASgQhh_x5wy&&N;;GE!|;sGpXb74F4 z_#YLfe<5=aR{(DrEr{ejT)B^rQv(I?5vsg!*OSzZCzc&Q;(n$Qj6mThhDxaYi!RZZ zS}T70EZ&AtkzL_L|3G9fNI{1W)ARL+tZ-*82&H1c8!t*NH+%NBeIoKu+d{&Fh_U#6 zSD70ObFo1e9mLaCGLv^4%Pu*&q?L)sve{&;oGq1eR>DdrOFD>K^Rxwk~ zmy^jvGFD96Stl9GBrrE842YBPT2ne2#w?a$s?7u8p~9;HNYfykWET1ngl|;$0$?xT z_tWQ-_#&jfn;^uxFt6m(aIjZ7_ViMtKFFtr3G%5?KtAEj@qzpYBE+ystRXu5_@l8{ z9Cd$};odLx8y^dY?r(=DO`6057kdFeo-%$EG2h7?!2R&PB07OD?9uAjMbS%R(Un4k zh~DAjj`2ho&LoTjK{yjPqCT8C4dW$JNcvVF)M=_45K;`_(|+)YXWEFb;HzNi`l~uV z5g~-y*uK$&P%+2J=j@_m=iX9ixlANbZN7;C#=?*&naE^Qb|Rh6 zB~$To*~#Sc$*g0=OF0L+k3{jcibN!M2|03u?RRkymsa2l$bNBm*v4OkK;;M1?19+q#eRMKO#=W58eznTF zAhimtD64zdl2nc4JzTkul^>wW!OE#p2*cEkd461mP!*vw*VQVpqoQ`dS_KX^cuQ)P z5L!ZNm5`*_$X;lyd}@_Mx*nO8T4gc_t4OWl!z$9Nn9!@3Qm?Y~OOX)8N(S1(WDc6b zl9L6%X5;a4wwNiE(#dkx$;BKU%AI&JnU0qeIT$aqb|#ifC+%X=vh0`@bD%`TJe)9! z6%t{^cc29FNs#83yt>0@{Xo7OBP1Z7!&L)v17ZyQ2*^9ty?|N$OK>k`nf7Q_Zh+!2 zK$}HmV z-auqZ5i-qRqmm2|B$Y;`Rf&%BGpqEeR6Gdu-7o~o#!6*y9{5kkGo>Q*DQP>GDmqRs zlPSVxm3^%uP!zM~8t2Ic$1v6##&yckVdi{txacg*5c?P=HwQSD0G5Czt9zq#^Mu(! z+HhFLT=TW=>xvwM_}8u^iD-bylLQHmI=RD)8T_x8k~Aw~DwWRUEV9YK>R7@m!O6LIn(xmujuLS*LE+^G%RH%k!K97n^SK!s#Pt8#v(H z&>N$>DBa}>GYcqF{GST>=c}9p?l?CdCL^DY+!>jSq#$%0fY7n>67vDra+Voa zo41><^WbB)TrOsF#cV8=&Zo`7}P|j6;%E{tj4}k&x9VOwU={!j|CE@fcOHb0xn(LmR zo8b_6;*-r(ydl{Hp1e|I^TLy4v*tME6Jy}sHSVl|cNIKKPKDRvshVfWRPX4KBS&-< z$dBO}-AxLI8DTmrY13w)u!3URD36EDmRK~g5i>*xNc4F>D|i@dwM77Iha9bWQyV_0qyu?<~NWia83Rh?n&ldH_3of6nk-OrvM)3v0E2GT%k3U#f3&!p3(Ii|6u~&% z?CkMR`w}!P=C6xgy^qP}a;0aX@dZ9PZUF=#SFVm5OzxUfCM>6h5 zN37XL$7lHy;tWlEyO5-|IpGH;9I?jxuO1y9A}{&-;W%y>t~)d~FxgIZqcy$}Y5Nz@ zN5-jag<46jAcpZ7_851wW8Q_4g8*0G_gQ-tH~5PWcXt2N61U=h_ST%T8}jY4?;fY-a*Q}2CC z_?t}xf=_i(TO(TMU1k+^He**>bv#-b8NMw#TER+Trp}H=t+5Rg zo8VF$rD1-)n<}8sKQYq&p6Jkcbfi*^TB^+1l<(W!)ci^{ZY-s3RYeg}eShk8RE6*mGNv^xF&Hqa{tVmu%W{4_DFb%6I?j*c;T;|Z8wZQ?{{RH+~~#UuDja492w(} z+Dy?gay^9Z&X*xY*4rM2W}YIkm)Y-6 znDJtGYAR3m=LXHEa+rxm=~?5(#xs?I9MU95$`wD|+3v(=r9)=!fEg5N*jC_2NRjX( z)6|Fvy&I+737$dF+acqgHf`F-xDFt%jvMzx{Qafux4>+1E-xqV0aTmQ?=Y4jahRnz zul}yjJZHB34WDa|%(m}~@MRkr1a~YlM&5ma*#Qep4yuGx5}j}RlgP5ZH=efi@{YH? zZo#RG7F@Ty!@T+I1wD(}uRmwOxy#z4?SBzDd*PC0hIt7l`=27_&E~a{3)@$>t?77I z9kg(HlG;yYZr3$|e)(Sh{Fs%H>=+WrME8bIEKzR!)~aT0$rv5-BU2 z1vO5_tZXKkOpysP9?vFnNyi2@80O=zVl5$gFW--gwLSTX&`0z#aaesoWamVR_U$9Lj>pSpKtNg>b8;yp=AN-iXk5c1#( z+5s0OQDZF$P9!ZAs_&pmn>GwNVU8;m61(ehMLmb>+%PE_!P&Q;f@kBs-pz=APX!^B zAPa3noZ&pAxIEZ3HN$Dw(C)-FB(X1r40jErSQ5gXr@9I=w^Qe72O=cfD2)Ig4{PY- z;dT=GzAELHhkGX}H$;9R!t4<{DFM>_DK?M)oUf4 z^K2fx3<9pV{F$!ovXaOv~MNB)p`A_AAS7MGRE-Z z@V$5Uw-XObffWWSS$QAc&v?5XnRqqeMYWGK+A{Y&wmH;kWq-!6>-YMJ9d?cT@fqGt z7o?4Sgi}JMW^~pxjBYRE$*>j7*E?U9r172x+FW?<4214C1CiEUqqO~IU}(Npl@V=i zfZ2tt(f&PcMA1jt3go5emzX`X=9eR%Gb6L+>*VgThs5LCKL1l4&!4x z&U$sTQQeHHo6UTqzQ2g%tz{}qcA#!blN2q0nBfZz|oc56o5SY3C!>#rv9wB1q)Rg0wEpjru0VA0YVlC>0qsp4v2%&FI4;hFQtZw!BV3x zPrMHgEFBDnO9uz3AW#KaNO9qj4*r(MT1f8rF0bQmWXvbNeNYw z5{A`we~1b|tgrS?a@Ma(Y2aZg9Xu=rf;~cY0%>6%=evdkgq?CwYe>%67Bn&?7GPf0 zZMd|M)TITUnG;f4m|`tT7;*W2BbnjX!7{@qS#44;Gd#fW8_NtrG6<3xzTZe@m}K|M zQ@s*FHbJg4KKAX)5^pFpWXYD{XXr#}$)9@uLPOG6A0RX&j98t}p!rfq!#Aim5H0pX zw0OwGtSHgq>*De453y)5*>3dg(L{@bnrQLfwv7RzMZ$^2b6FeTfAA|U#jK2-OXX}h z!o|vE+lKFl=2P<85iLT{Vy?$99`4b;0jGL5L$RxW29kxP6LJVySi0#70^Wx!{pdcX z-RHFXLG8Yv?#ViHyCO**jcb9Guni)m(3cH9u*f>oN)JF6sRsq9$fj5GCdER{79J4?F0gR*_5lI}6kQ zQrL0?mY{R+&8-{@!8t&_!8u6H-!*j@pJl{4sFV=S@Nh^zC_bW0ETYKQsmPZ0nhIUx zog|$-J-EU!BB)K8MY^W$k|lD~5)Xum&;t*II2JwdK=2@p3N%>s^aik=y1`NDE&qz{ zM^Uq|C+zCY(L8`l4f1uHh%bOjz`CUNpNu7)FdKDE-M4^-O#&rWm3SQe=zhPrMMZ1y zh0Jy|hx1;#zA*D%u-cu%Oojzz%h=2XTr&TB*WGWWd^Bjmjzt2ysL4#YS3^VPHOtM< zp!AIYwXp6{UkF;LA9Te;vQ>vT?n`)Az4(~m5ZVJC@?+F%{HU@yzN2OcR<1*53fn8u z!uE?Xl3$RO!?jkj$^>3^)ETj3O`1RPOf?FLMAXQJPYH`;OaTh5P+|iO>;UBlVs=ff zBh;^3;@h#r(=WSF`~McE_ZPOzbyLyZ+!Q^cCsVz%?jz7)N*WO8F55xIE4m=vGE49U!rpue-#?en4Ac zJ79s5q+dE4fv5NMc5^7BAP3}gP%mJwFg$k?SiS6w7dRfglbF!BwtM*MHJB0pobzsk;8**I40J&RWfVdv~=*zGS zw;lWj44Kml5B>>F!8FDRGxaHjnX5okFWY#q9dD;2FT4nY=T2m&`Q|wSZzGmZ&!SGa zD9C3qXUIp-pkaY*Uou}b|M&mm+NTeWYximG{x>gi7r%$ghg{u4Q3NPf0?+&bX(+)k@RHnZr*+T#S&=z93rzBD&EQ; zBE7Zt$ZrPGm&!=n#h)4d%7&<0a1jv%86IKtZ(#juP|tRfl2&fSjJeURU{mC>fg^%d z7_+%j^r>&9oQ3oH=?1NSiVeU^p`y9`Jy+m)&iDK*p1X}&`p?-i?3y)s&u7$L&~%-5zLo?wuI}(2VZjB_*{o< zND>D{AblBQs7G|DVZL0}xTm9`3qUSo99hsfN7{I?Ne&Eqfo?hXb~Xn`g+|G6Q)p6u zjN;7n2@86b0wUK;=-#HX1P2~-y)PZ}pNV}(=<$~IS^I*Ust1R;%FgWtGOR%hI>PZDuMc6&#NA2}Zl9uXo-X^Ih zm5HkHxwbk8C%*(l5D4jq7{c+^U{RaE@j3%U#yn?7=QktwbONrj(F@H2v>)# zpk?d?P2{a|DN!nCU_Ha8={$lqB0`<61uMY-XXj$2WI1J(V>#@5OIc1amd&RTuPd29 zf&3tPuu5!7<_*>--5^9v=96v^L7|IZZEg@0J+KD5B$qU@XuPVuAmWK^GlNDvk!fVm zh$AA!KzM{0pGBh=<2tKA<^_s7i#qG=Cc0-_#}g^F5- z55z1k>;u8ZB$#vda=P3Rr&*$!_&~7LXGZ!!uw4ufF#7Hiwy0V~(gepgWa$I31WHh4 z*@bO?o#q4a6BujB$fQhN?3wTwO)`vN%GgT5L&#<*d?5amifjq^u! zz9KIMG%E~z((1@YH8iUn1-MyqoUI!im1ZA^4%DoCAPk<^ogx|<7NVP;F-dG^Q(D`r!kymSAZT!=)eHZxy!~{u;O%vJb?&IB(4d;w8$81wKSq%?Dx;74p3=y zkS3OV8>mLQ&dT>xE2ZzIEBiW(&d=9@PSRk%hbXyI!29GHa)}|Y1wj33xPZcCVLg^u z0rvJ?!WzqPQDNKYQ&RlpmzAzmFeUH};Fd(o3mEJfyK-v_r=J!J4sO`dg{x+9`Z|(0Q&4Oi1)W4hnG2 zg93chpbq{=vf{TL{N(>-Gx~q0@z+`U&muPfVd?)OdqeD!;S8`}n#P9-5_Rrk=2n}q zUA_A$9hmwouJoh(1L|I~7&QMY@;7KdhjJX;uHC;!yYHs^g$I8DT+3xSR zluyB^9}+S6zSEVZd@Gyv>%M=mNuMqIfu{2BvPHjc<@T_>zM-vr4s)LISUBV9T(~(r z&fC{R4C(Q7hODP`UH4VbhwL;rmdCMw9h=X?jkWP~qLHyYo{ky!ENH<99>-4gR*dEG z6ingD=30XX@93~4{l)fBQ+GUt!M8WErpHqhsZ*M=Qw>bnaYR;vBHrs<LWgn2x^jS)nE6T z+37Xes^Mu-2Vufk4Lf^aN5mM8zpkNyxq7bSn~|?~MCLj^v4AhnE#ylA=IY5VW8-6H zy}7!=*6LN}i)Q;uSgZfZYpp)tFh3Att-hvxPsF^4|GV5gA9m{Zn=g3n)CgQ$D(12O zK4r(S7cYfaosNx=-{riOL+E=??A25Whpc6ai1lfel6K5VWNoLMwbH3;T`lUfC6>EdE{jam>2} z`0zpzsS8_A*ORpWi>|5v&28zM^gqjQl)gZGS3IU#r!mgS2aINzzTjwsE1dtM;*$r0 z^H$D4J4@i;yMrHaWH3)3eUU>)FiepFc&DF=ytGGpwyT5bcJ|J4K@kLOhbSy#Hf!1f z_m;kS)OSElt$}e;iz^PEPa)1!I(-f{-0sgKLfL~qrVmrIv8BIc16UiCS4x7A*VV|W{Hr<-aaT)?;4&6YM% zYq*1zd8(12{3pR%|6UFW7jSP2TmQZg8ZO|wT-vvh56$;^vf>)7Zzsse=A~Sd$c=c0}cd!F*NKAt{j`3%Nhig|jaziw3NEXNB!!z%*d zG!vNd1xH^g9I$;E;%|BMN6e@w&o&vv*Bg&04yQVA{wu2a(~+u zX4>pB7cHj0=He&K1-%^$7A!mEE%>{_Fk{Az?Z0pSRiy1=Gv1kvye)EC+u7#pA}c&z zXr%;>{*Bw^d@7cWmE-wzDVDeGQaWX&bID}Mb2LFBW?T6f4!0|(V>x8b#50Ku&InGV ziU`J&D+19^5bj<_p=K{MjE`ODu|RAjYJSnBZrEoLp=uqrQ{^3u-_A~HuRna8f;744O_I41@ePQCwA*JR1kGiV2$$cTDvSc;ue*r@& z%$!%4#@5Z7(VEOAkr^FiPBy7`hPu6IDRiaMv@x1gV%-R7=oG4alDJ zF-s(VO>Y!>VXZTRe9yqL+8ydJ%9991(?E5Q2pcUxElMIVRo_PzDw2UoAd1nO9Pitu zH(OrDNO!V8*fsTI>hhMa@~$R;#NW~5%m>-bGf|*&g9SSNV3iQGsZn$v9g$n6OF!5#P@g*`$RZv-coP;V5Rd_%yAS`h^=+XTGS5VWve zN&NZ*qr)^`?t)Us)wa%1c-qQCK~@wKB6EW(72GwcR(5ndtMX+`&sw)yfHRDvkCq@t zRB;_m3iomwQ%f2YQA$`qiHJS{_4HSWz_LZ{zjY#kh}Eb5jP9O(9Mfw|zKmu63N%cd z^6>}hj>yF{FoqKQia;~cc#CvgbEBIQyi*8aT!j? z10_jXEhGc#K(T{Jt@M8I#h%zMUo&#E>fz~c@K^+apvD9f+$LG{&R2@${?2LRsic7r ziVwl_Za=N|co)otNAB$U)cs^O)M@RElrzEb3GZuAhMoHYMcq$2e8PR#6rXY zAzl+ z-*?AN#Qv)jh3zo2b4kRN{ysxt0dT;tkaV9)`k_eTt`Q993Nz!N0q;V;Z;BT3`^KY% z?Poq!m_dw3E^>Zh%j1Ixb&jTP|BnaP!|GXs@F%Cu7e1j3r9GJ2_}sy7lXX_wPVd%o z%UAyZd#PK&!9Vjn-Bx>25=+ieHXb`3F^4TkbaL_N z6&GFpP4Ngo`>#d~k06RjJW6Am`~2}J{@sJ`|35V#`O2?x-*)h8zpA}@FWp~m9E{U- z;lTsHtR?+-?fx#hHx91DWoN|I>cz;Q*(gZpB=5HOs1I;f1Bdr>^*{PP_lJY>KNX&T zFCU^L4AQ!CMD5lOY~*L5g?`IA$G}2g;643;o3Yh)-Y>9Xv2if;1HAuVObFtTVE~Ud8d&4>dRHA7*cWHJS&@wIfz> z*ojsvIB|TW6eSg1^qgBR#J?VbBewW2FK?>$Up_Wg87t%6vusmuJRhGtgWVOh<;kGr zfpFWpSf0nRE#5GKy=9HhV%vMj9Pi?7?>?gNQ{MINnI|#F``;3Z0Sa2TaYRv1Suw|l zIiVrIZabUCUJRdO*0GLrXb|6YPxesHywm+teis}p|Gr+zADs3rPIG}-9>{R-+o*HC znflt>s2a~`#Z2w)YdTYp^ihRZG#RY~rw7lgAK%pZ+#{m3V`J~LMm9Krln3y#+aR5> z_VNJd6t;N|`i_hrUqv;#^i>ax^2G~6JFn>L=lesQOP&i)`oREI96alfF#`)u`(cB$ zmm8#gCLZ424>--g$`7OhR{1+AswQBSA4`W$l){In(p2H<@Ku(8Of=)m8LG|;#5;2V zi~J?LtXt&S$lgxF81>jw64-=sr?jU4B!rWdF9!vI2=TE=;53K^iE$(b9BbgKKZaZ@ z*-dTb_3;KiCW%DNGsbr^fwx8+)Z9Y5*s$74E>KsucZr6*qvIc%I}#-l2p7K4EQtXl z?8by$jeazGMU?Depw$;eFO5Z|9~$oC>OSf_P$Q9`b!j@g30i|zdPS?yw6-{{K=WKQ z|7t}W&RLw+poQMW;mWnp9BqBhbn9E)%*9j2k+znaO&HH8w;w?ky?_?40l40v*C3gQ z(WL3L)mur7W-AQ8Vl+Xz&y=q$L8pi1_|gd`rTbPx)QY7Vb)sPym_&?dfNZ4UYR3>S zrp+w!Djoyi#|TtI=OoU-b;9Q)j`%tV;L^gR#3>9$*vi1LSrkpn#*v0GcH+4_d9}6D^IXn&u|v`Wy-~JzpUcA7rDGe6 zXZ-Dv^l>inHA?-FKecABKcXTeyR1ps>yIq(KmZx+h3scM)L!q9r%EAOkq$T15cOi5 zydeECm1yf8B^nPc3>qONk64==ijCs|HsRhkKlgyzh}YIdo6&p`R4C?}KgqE<2YlE( zXK&{>7eCYq!QfLSUmk7e%lF#&@>v)s7sQoN6YMyOg)br-z1(%J!DSy`4=5I|TEHm5N z-vdR~M`}w+4YK`d($lG1hHu(xD%d5x_1veDh2dFBJTC7ai zHNAM4`cjmaCQ{L;G{p;jwQGup)eY)&I-#hQ`9*Ko2!|`pGg;_X@Q7eq#>+b)x2=zc|jx6$sQO}_YI0KC5-JoV_rbsB>5AcGLwG`9IQ>0FP zLR0AAzS^|a94cuN&<9m+bhhI)Q-o-_!!kAwcdMi-!=6l=RtH@?t$F= zL?%J`{H#nrB%cp(-@n@XJg%CP2R({CqQi8jjQ7&XL%|F@P$?BgDqcCwL;`5s&q*~C z;n9E)I!?n!}yp_lWTLI173DDKQ}+KX8u%t`JBgoyanRDZ?p4 zP8dw}{7Uv5o$BUi*>fC8eD`Yd7a%&oUP-Wn(b&j)qI->Nl&ftyK8F*qIy z7V;%3fsREfUn8#b2F@UDXoC5m_*QxgQn-V!ffVErwa^INF{l%q3H?NQ^=x||!OUH- zd%{+I&*cn2bjUzsu#j$g?1p$HO_eeqE9|B4@bZ3!ko*XODKN6@7{6^9ZMm(%vrA{# zQ>SZ+QTDG1Y2lv3%LOkbxqKVx3Ur@jX!vUHR`lHEBhtW5t3)Pq01G8k50qD#c*DM% zT~oiLIsjpuiJTvX=@_G(GF-T96Y1P$_)h3i?wT~RZ{`PSWS``RR(C?<6~R_3l2gs|fd!}9W4+u&0Y$d*G7-_;G6yGt#}8gfyP_ip zW8@}*&i3&d%K=}Z%3mCH(~x@4E`z51`^%- z$!nkn#_laqW%arSdhjDWc*$?HttBiNZf@PAyuLGR*X>UKj*(fm*?M&7!Y=elHn=vl z3*Ei~SXNTZ<9t8dQG^+ii%+P8^>tcBHt+4C82UK0Vtr z!NL@^!y9#7k1DT>6yN47p0Z+bzQz{ZhKuuU-p5_m7RP58wcJllu}!DPq1H@U$eg`J?F|Uc2tXjy1gy*nFI%GHW{oxy>_prs* zHRn4wqphKl4XE$?ym30bBfGo-&d~|Www>|u#EoTZyoxhL@ukiCsKcxH2^)P~ix^cg zK0dTz1X^ihj_tjE>(${m2UxEj4729$Y;Mhc;4M^Pa~tJ&5Bl^Ow(xp!?rUe$0*5T- zo*t?_d;%ulN`-?b;5Tor4b^qt*vgh$*QTj^vur!3v*X!d&y4%eY21zdD5{MC>oA0n z-$Nrql&db+J^BP-kIkn@@A|tr#dk< zg2bN%S$nsx3NQqIo-Ml})qI9+yDs2dRjpX4oXojK&22@4^Qi$EU)}z<`}uYK{oD$M=*f+87V53KOj!nklI{aj;foU@dvj*b8>kH?rIfj~> z+QQfxp=@CdO)wnyzXe-ZzKP9;3=?K!lP*jXpIam~-lVr@6`#*)1*^o1i^8lQiG0HN zQG?XDaZjgLE$*4SL>|Eq7Bvbykbn^))b^5q-JwAuS#6rG3l0)!B^N%R;lJ z8;^X+DdV1IH6-b@jTX#m2^8mHW*AgPiDQ~ z6(aN2hM|)2xy2f7AY&L_j?>v3I>A(Sw--A-tx=?k4P*)yGwu=~GJ};X6C)*Bb`c(@ zSg8yfi}BuKGoojq+2}V^`hAFR4=oA>e%&}d8z)XwBv5(gWjvHaDTQb-7&=PRD74k^-m)iO9 zXAAlA58~-MemD?$XbGK=J-Ncz9oe^F6dQX_H6JnC-+JoCQ}W%XnoqWMoD0L5xxm~W z>G%WlJ%}H@VA&G$0_6Hfv*YxRiw07ei*Uxo*CLVLj_VdKT(tIWZ|qzc8CkHjZQ+u{ z>$(i{c4Ioy{&BLKU2XQ6F`TvQuqPf~EnK=I9rpWqpJ{?(c zU*wdoMO~gf!#LADoh_#AR4HG=@!aWjrfgy3a2lt@7mLY6Nw>5m^QC+)m59e-Uc+wT zQr0S^;|a^o$5K|@N!buqP7q6*?qUm)I0>D=FC#VEn(ttE>_>P9G^f|lHpMlh^x4>1 z$RC`zgiK3)h3Vz2K#H*zY0Y-%4%SRR4fU_PvrK6Ps;4|P)jNwxir}F4+!OM_c;TNr z+^>3;(p}+nQ)SEhTcRD zCIIllgBcIklGz;*&PT7QL`GjnKpZg^7tb<_bjthH<^>+uNg;}iolWO6v2-b8Ws<3E z#xACd*?cKePB@TLw6lLpnUs@`TPZtLEIXE!OU0a2E|$;49XnGbi!Na2DRY3@A0M#C zN3F53fm_E1HdF@2$LxU(L)DEF#eTc8X<%riJUp?vR2--}!_I~=t2}UJrR0o`58QZb zW$Y#tv>ktuGLEhLh=sZ?C{=8P%eRMzoRR7P1bb%;R#v@sks%A)2TMQ*S;A)eV5X>cK-Im`c!K^$UU>I1j=etIiG1y zCUUW&gCIZje+l7!irG{;mn}Pp0~9agc;*vy+5s=WL!_O=G`UDi566tOJA#ll9fq{< zlJFrd)Etsa0cpiIgb!<%J{}1{TTtqFJZB;JQ#zMP#AA6YX<1GghY!WmIJ?_J@?WcH zi;s)J5VS2Y+9G=BO9okgYG~WiDk!{N`n^aU;?8H{U#o}*DkK?pXG30no7`)Au`%%n+(*24FL zr2+1^J_-<)_}mMhT-<_0@Of6_2T26jxCM=oUyt;BTbLece*|3KSnLxavVLmhORv*~ zcw7qyd@6?d@V*}NjLAs?cVktc(=W+{eMKDf-pWEX)i{+0ttlK zR6d!HW#idQrWALaTrmmh0*A8YvUwcLrVAvHJQC@6+Rh*fPP~{aJEfSD$$(X6i!YsNy*-8Iu=mm-NCEZKx9})XzE8zj7*+*P7>zaCw-_`=RttZpzeT*_|D89yO zSN83?`$2v!L|+Zg>!seVsXHmF*trgD`T!Z>_HtwCzmllrxahQD823@jK+klU^ zM7v@%2|4$3p>8~dE#K&dMbquqwv8|_szItj!_>JRua~+Mgca9;-xM#0ep7rWkb-N5ML^^I1N(cIN8Lb z&hV9v;b;?|3c#C#cf)vUApOa??39csYu=92*Fw!9`%Yh~_yVhV`=f|uOA<%Q?Ac-d zugEE(xo^!e6qto%Xy%JyaEbCJ%H`EIkj?F~!2mn?)r&A7dNDDsU+`v*K9 z7rAUQS1Olt# zvRRA2=}wFYq52mcbl9`-p!)2B8VU9312z|CJ^<#w1c$uiaua+75Din14<=K6l5*{H zF?IV3L^41MKtae^OdJG-!h?!}e2dDgEzCUJJIPtAL=tKNH4B1{Y2BRiT_MRp%x*UwPnsrl-?kS67_%tw9QYz1)>}8T9IkKA1c#)foYFlrrE`w3&C{f z5M-L=Uc5~KB)ypCOHUxep$D&h=@cW;i`~8qIODeg;mK1B#*qC+$s}1;8Z@5F){`Zv zp7oKld6ADk^Nd50Kaac^i5!aDB`%#H9Q#e9_xlUZGOsnSFi(kG7kSI#QxB5+&+10!M9XcEv`p(lbSrUBtPfK zSruJD$H}!FcM$z*&i!uuGp#S^YTz7-RWsFE!q@A6!ny5YR{~?LZlmxYgKWo_@Kmad zIFCPy64lF{_GRM#pa(uR5EerHV5i9jR2Kqhpm@hDuIU#A*#Vi)1S^j^&6JUl|+k zf46fh=I=p{UqXjeW4ryeBjXdJqm{9$Q$o^DID13isn4|0^atAvp^j&EhVgK{f1Pmu+Sc5^?tb>K(`%dKLLP@OQQIsF;SC4%MUKO~UR!``VLOakh><}? z#OS7p@oKc_M2AMAV-&O?dQRzrb4op!u^sH7=XD_Z68q)(;u-v$GdAqSODGorsI1Xl z=4Ghq+BV;C<9-qcJXD(c9VAmZFwo{61u$cP2Zh?3lQ{gL0T;)y4|j&LcpUoh9#75Z zns1&o-*O}pnKb{VjV}w0Zga#eM4mH^=-!TzKk6tfd(!-5N7r-*WcJgH-!p#@!PdFR zRUQT$i^tRDTq&I_X47^&lh345DI6_tm10TgI7)e4?@_WcsZuPLh&kD8xo9V{MIbB6j6F@ zX1tKfyRkf!$UITGsmn-XdVzDPz?OEfINF}WrNCZM%H^>vAo!a33Du5L(Rr16K$gNr zxR%?hZlHDs=Tl-8l~@~Ig19&s_#nPHqEEZPXeJ~8C?iwt?;^*l?${iI0-`bZI9Kp`~{@oboZ^v2!{;(H}&#H1Zs)l9mP zjR?H%%}sB|jqZTAqpnemJnIw;QF#--+uOb-c7l<~%!&UrV+b5fiT8xw%H1X5q^Du_d9zsX+8+t=N9~ zX_LrN60sVgFQ9+CLkiI~q-v1*3(zd7#E21X@~RoS zrlMSv+uUM)yc%-*Wh}AF_I^&pPKAuj@9VnzZ|OzNIfKF&qw-GKhpJF%a3|D`B-Xt^ zQ>&CHT~ph+()WWDZ$u2r7pjZdq7ULyfJ){4eR$1=e$VThV}+TkpcYw+>tL=xW51ZY zax`N%xdZOle@;|(=ITL=omW7P{X6`69IxM1fGVf!?!TZH^BB7tarF*&U8^IQ&9$g; zox7-27{RW_tIMzn^BWVGy7`Yt%pfVH3hWm_%Ao?bI=}Fz`1h&W?Ber@&TPTu6kKZW zYj>1H)Vl59i`W70d4D&Z(%5!zBQDEYC*Z$e0v@DB+8?U&{wdSlpfdYd0uD;~ByTGT zN*N?m^O;_14Ij*g3ZW+cmr3|vHZD)3x`DzeQ)l7x_Ak>!i%%OV8rI)L8wX#^w>K;w z_gg4yjKt?ILC1I0bCH|DMTDN;`m ziZ|l^&7??M0H%XKU^=&$&$WA)PTtPOl6Kn87V|b@(IjlRMHMsoa?UB*#D8?TJ(Wsl za+Z~Zy)vCHL|!+*-Ma39OzkS?J83FAN|zN&>xj13}?S!m;qc>v`7J7y^30a1mT z2f-~MW=1hwki%h!s`=;daEQEvm0(ZgzQQ-=+6rI!x_!@AZhG;3MxpDWXA9Vj$s#kD zQ*||thVAB~VIf@W@@hesjX8=xRe5AiHFEnO6Vn3Udoo5u>#>g3$;)rUt->PIXZYPW6|mMs zENp896yHLyE&+K)30f1^5nBcv3n2>%RVida?vg?l7`I=@szymy9FvgFJ=!)R81H6c z*6WB_Zzg8Fj^O^|;I1~3=K=nYhZ=PT0a8Y{4*{}`sXn$__*`3k<L34og&eD0;lwW#>r`R|vNLjk~O7Cq@taXwT z7NmE!(6mEE%HCKUBS<D{$R;5byNG!y!qN)!AaSc%jFBWFwS+SdglRcW%)}<#p$?(UBZZ?HUTNmxTbPr~qh;pdo4Hn*hl1vl z%)`$-QaCBEIrB&%Bw2IjkwR>;Q09?BY_j9TJbXalXCA)flg#6&n1?U!+q{?uj8m^+ z=8|=o2;t6k#-~@VAR+Ub|YbDjM#+4A+3816W(I9XM%q{ z^4h$hVPc<0)R=E;2u!?8iv*Ri6P8shC+&O*aV!#9D-lN&E+`vyKzDirC*-P9_t{7(&fuon#EYedY?p1t6k?tN3~PktSC+$Fb~^ zlS^6|)Rj%f%GpvmXCj?kxgTFdL|u52#}0ZbgW`5myadNXx}j~ z{YI4fugc@R9N+xL9EZ-Fa_n5Zn29G+s4C~=<9R!k$EJ8EUaJFj@mx@S-g!74{YWM< z*_53~BScdwUM@Qsxc_AxD_+VuiA*Mebw>L<7FrXUQ~HN~e4sfrb8JKnm}z?)|IU`z z5MZvA!@voa=h>ETno6m395DnF7J|v;%H?=GQ?z15gr7>JovcIPGH3`;R!Anh$CqGG z&gb&!EGo$tljUSFoh#y?u&k4JENntX^f}b2K>0M(!l2x{%J?Myd6&?MBUDzroWO>v z5+WbQa%sd-L(D8YX2l%fTMZPuRi`|loB#Y_qNwaQs17t_iw z4=iulMeOfNm2(KjP|BAPxh+wOm$Gr3h?W6HTD9d@ zEN4=gq;01XNyKh+L5R?i=As^yk4HbUF+`WhI$1=D$drnJ<20OJi-^>d$rM4xvbBED znrNVm(0l#DQMA8jz^U2;f(-W<=AdD~(^>o!AZR%-5CYH8^1j7xYQV=Hc%z%~i<~!2t z<}HvkE0(7HD@8DXSZxyFn!&$*`s?Xywp=dabi8aVmd>ZkiBvkDEv12I0Fh!5!QpCk zxP29Mtnx1eQv$IL8Ey=J7Rwu62hJduA*{5ocfadZ(7Ecr>Fl1N&F+Pm)RhZh9=Ru&8nm4%bR&rA^UXH9iZM5CMgmT9TGdoE!E+((aE(qFG2P3RC!aM zQFf(SZ9a$v>LpTEHVgB3GG=8n2x3I^Js!^{a!JPqVH?E>;DnE0FYa%4u8r?$`so+t zzYwtG6`{PuodS-*oKcK!5A=c^pEbnwCq@@1sHg_jP=UJu@R%3 z$U5bCE}p!E`Wyb;%Z8mZ*>;EXOuTtoFQa(iG)&DGC z3&>OtxC(9;b>9e1R(nP|9%|29A6(4O<{@~+8-}sw#W;S3wvoLl#URK4K|RDr5Um7IJMKpMdymGsrK} z*c+kQCzw0`IREqZ=6K@LN&3P|?pf_)rY|4TIINFIu#i_5nT5#9M!*dJ-YTyxl$6wE z-tGCoJ5_=bB2&cnIIEPjV@@J#JLRmEPG!qk+j2l$c7y-!ZhIYicAe~qnSjXqP9jV4e(OrU@0>gGc4KRgb|0dES+JrRQJyG7eIQ7gC?vIniyO05^eTW16xXIa8KB|!0E)tPhTjjuDo zWo6%vqD|^*^OMhR5i_+`b?)>Aox{O*vO6u+OC8V@u}#mQH}^vt-`_zHbBpSZeievY zd$YTC%XbfYpG{x-$M|^?v9;gM8AZDP8A&p|N0mR1vx5wZtD&Bu&DDSAU- z+xlmeQ0*gK|CzIR>a4xt_VnjTV{VBeXvl{8@m%{Yp2a=#e_p;)^|{ZJGQn+s4zvO< z-DgVIo@eQ~_M`F04-4_qv$~d*aZ>n}E8_#pNyqH66+;X|(hTQH4xL?L9sun;5TVs2 zI`iZH-_1Y3;5-iAB+_+*7@c8om_9r-Jgie9#5~#{v^c8cCi&x7b?$BQ#z#bI2laRl ztCZb5s&-o=BsfBmbO$XGlJS-a2_Mbq0kx(_?3k0-elWR^Smb?bz8YMkw7FRQE*8Rz z{~aYadPiJW{oH)oOZOcz^~#LyJHkW!qQ>N*0lhCF?=ZHyeD&`HDeEyW~3c;^3OA>rsveS<%w|ioYUiDs>h<$ zIo7t1b?i-#`f$fiddQwd_~V(oV;TFgw%zn(1XxpqwGlE6{}|jo{cm~_gIkAM`}*~7 z8eZHr-w{sGFN%Nsx#j6g(~#zFJ!&mzJ?ht_Y3O3No^I+4euSs>b8}4nc?F`UvWn+H z5E$`WEw3!(A7f}%$q8S9P7}W9y2sYymOarBhvt^RsU)g%qRk{B9mDwMy;J)ed3~DI9WS@!@6e9$&5l zA%^Xc8r(3yZbt5dd-yd)P`)a0RY+-f>!i%aWhvSoA1yD&EzXt?sh}??FV}Jg1y@FJ zbBSZGkNXgk8Qqmn=EvI+pIrOwR0z0G%cjmRMTn7Q*z&-T-io#(~{}g#Avm(Ma=53(Ry=n$r=P)8CtFe0mviTLnp-I zHtH(Pq?XHVPpxk=BV9vvR4h7>!)lZnf@l#+M6S1JBONMV;!)^d7l0Bdu`s+T9O3Bzf(hFVUp;X*an zw~pZ$@*>Ve9m4Ub94#-TFRAbnT;o|N%>n-}tyYFY>TnNPp*&nF7g+1?kd1>10{MJk z`1f%t!1cMD*NvMWQjg1d-HhH*Tve@DAw}I$P#!Mjp1^v30$ zCK93dLptE$`yxXB;bm^w4Hf3A&C1Gb+%5&FWOC}t3}MqxSmw*gHJl4X?psMdX6N;@ zcUegDQGY%zO?*bHHKZ(2LM<1U%y{2X2O2VtSXh3p&4r##)=drrRY+G{=y5Z0`9NqM zqPP|z^#npQ7vTDOixXwI>|JeJBjheM9#YF$zHD4J5MM{DVe)f-5F#Qw*A){D^sS_x zAU@T3+@+A-tPIR7#-keHAv!mQ)ZoIm%*JKix#e{!-d1(0m4*b%o!hzsTwk9kJl=oJ z*wCh{>2T+e-t^RE6y?=eHsG9;mSDxr$mIjqr)-7sk-+sqcF`9F)htL55`>MVE+dyG z-p!cRND!eftXd{92G6Ue$mf2VxZM=E%5@P90%N{>vtyIw-hhQmPOs5y`|JVj?oX%v^J^;NA=YhlncS z@*&gWuYtacO?^-Y3i7& zkP`YaRv2yVoT-rf-jk?2>bql6?Kcg!?3xwGI7Q|n&;iX);b=A}=;fP6sP(i@s(rM- zsuo6&K5U7<5bp@m4+h6Xg{n>kZEIFRP#ns`%f!~sertF`t?{TZ7f=`7Upsb+z&|^r zztCnC}`=t09EF2g@I zb>Oa+L8__sO%}Dp9%KdV^61=v_lQCaD`iC4GQYh4jWMIM(``aDH2=R#qunNIcG&}_ z9$&B4ZKx?mlXw(U1^YX%PRPu_xn3pT>R31Ri(%HrX-q~a-KCWuad4)bH9SFIq&qlC zgk)6E8N8q=IAr5+E&3a{v|>REh1G>ZL7O!wUbSxf){`_i{39zM{oHi9oFQeYhf%HC zcUzkrKuAp6?Wqj2lq4&wFX69;J}*P+p`?I%LFSNn@UR$JU6(j5pi8ohze_|+L)MFu0=gthP(41FpMynru4BENn$4Aq{xgWd8|C9s zU*NIAKXpCoihKoKa|1s)+5%TQ9X-K+y4Ge4PH`r1x&9A7%{4o$$P%^=$bfy+c zT$_?DNnArI4bxUX>u5$&=OTs=zTm?`T@Ru`2ROxZ91RGw>Cdi6&0bFNWk0|#b#lNo z2!20pR8K8PFSa!B8$GL}9Q*6CPPX%O*V7X)PEPXmVn4ss$EZlap`h%u3;fl)cs|ET zzUtR^n}-(EP3$4gsyC{Te6mB;KeUMsKplm3fW#mG+YTfF<^79vc-f#fSQ5GVCX@(r zUbI-L5aC*D81Gc@x?J7_WtHTod5uJl1lQ(jBp(*-8Rjh3Z`M>|%5drQc2Fj=m>id% za(+-(RjGPbSM^?9PX#7Uw!IQ!PAl`pNJ$wrfhS<@0RG{k&n%KAO(S&T;kAq$I~wMg zWVG``k-%Lob~`O_z1)||kw6tLB}<8FO1Y)TsRyiRFnaw5T>u4W4hC!p*K$aR3@uOn zJd=s*_3p_BB+NogULjlGxnRe0fIsC>F5k}o!eVm!eBh_)bV*9yU~~*Nl&*nIZKMsM zB}`C$2Wq1kL#*_h`n9PZsMQt3T$8 z;t$I~mmz)3=kv1}lu5jr?@}|e-GBwqkge&gH^o7Wt;Gc-w_)P{bwG50UkjqG4qR0N zS3ST8aZBWuNQ1@l2AHAuEg)G*t@7FOrfE3|#vjnYxM`Ul?%o=P@}`*~UNCuA;Pd-q z-LnAmSFsjk-gCil_#rPGHlLTKfCwS5H9|tbqHiP#gBAD_GBsWwuvw*H83`owXoI@6 zx?%6RGluDYBARDRs1OuHZoo*GF50R<&Lt2HSy_14llb9brA;0uMkifm_P9eB5sU;FA(3< z(roJ=PQrG==6kV-*#B1#_Pn}rvBBtg zIenj%+DIEIA;f2;SPbZWc)~Hw!Rms>U%*mbQ1IysV1?8$la&S9vbutvvZ&QE$W;w1 zO`VKtHLn>OuZqvHl@d~nc6mcdvbOpXRFB8{K;R)NsxuFW=@FlH0cEHv|5WZrC-%1w z%AP#7>5dDIhXkWWrmkwl$9h#?o6YKpHp?K>`&!JWvi0KY@xZ*k9!bC6NR5S>*V0#5 zI2bYZ)RBi~NHZ5P{7d~iSFuaId6jIn}pY9d0=>@}1Yq>4Ly?p+;sqb!rGftXq}Lz$+8}`J;Uwdv(i%jmF1|P8qaC*T&j#twSXVit3`thBXE+ zRMc`B!FgQfLkt!{uKAP7)4bQ2W2KTWg@jn=%;BcXmDNqGZdwl@!IchGg?_Eny*;-~ zy}G5t(6RB7`ej3RoGq`YR>I@f_V3Q6P7o?SF`dXE9XB2Rks}?}Xa|#)kpND5x;ZyD zuS9z0iKDG$TU3QS@|wI5v`$0~1OIz}!nrQI=hD3{tWVU~hw2>c8d`>A9As}d@Z?2c zx7#5I?4}1Gu$!hMu=~;=8wBJr)q$sSe|Z%U_|xk7cwLu4UNwxsl`w?6-MlAG%Bwm; zyThWf1|hVY0qK)$U1;EJ=lggk9t|UfB(@rSUE-nJ3d{#;Z3TSQOS_`VjP3+|N!oTR z!1HbYB0-4LxI0v8&KPYSihh)Ah=Ybv$KCZT0o+|r7Ely$5QzjN#hd>3AIz=*ASt&C ze;1GXnat6X^D;*_okR&p;d3Toi6Rke{9d0G45tY=0mT9)jh3CNRkHr|oNMQ{%Ddis7XUCP5?&92?jq=EQMzq zXYoi5T&qFbJ@RiUW*@fi?n^;Cv|rSo&aW{ZpLboIjH73fcpF{=)h|D* zYcu5&mF>*#ap3POBq|5bCP)zpp+_q#HySWwwU4BFz@&+QqYzUqJQrA|?A0v^t<$Rn`~OY$tzGP=0NJLJNUBz(|9W3`n$Da^SG2&FBVbYF=Q0rl zUuLRKA~{n9UvdM|&}>|iJ~ZwtHDB+{YkPk6-&evq_igc01dFO7KIO{k-k}I_ZBXUz zk~jYO5Yj@!dZ;0R`gMiz1%VIsQCas>|1f(os1L#0=3V;)ck}% zUv`>KpJ3(7(O3xdWv1$Mal)58A0g0JG*}3%%hX5+tQY^po0<@)ke#nSA<)}Ym=NgA zC25aG2=e9-)JKv&Z;~NDK?h~8)+S!O^LPq@%1f$V2=w*C2n&&*gzM0W4H5#qmDBhG zg+OmMEidky5)<;V`Si!f5se{t_x zE#G><@T+^RnvFDA7W9p}g%u0Os~$~)XG;v0DyE%bs={b+np~fNGPEc{H!U-Yl~Bg9 zPnG}WO<0Lnu@igXn0We0k*^_(7+(u>-p#roSUNy#h1+>eW+NFx7`}q2pgxgQwq9L8 zA!Vo8V+znlf;gM=pPZI;gk$OkTZVs1z2M zh0_8cN|vFj^qmFXj@IqAP|32w;F46QAYPEC-4gJeChuwG`=7V}$N{V#b5s;152$Nx z8IJ8vnIN0hqCU8rB$dr9^k3R-r_M3$@RSX1#FVV8Y^? z19p@SgZk!f)Il@k6S_R4AWZ1;WRke(5lB24gtQ*Cs9r|?m;a}K^{bnaHy&f4(B)ge zW)4-sE7YsA?-;Qa!M`c`3;)y+K2S#xD-v4tpi)E|1-PlKKpf4v?Hlc*|BN+wMJ#sF z1o+~Dm49|SMHpVmEH2)_RF`6PKLDS|s z-v!A@DJF!{DDTTo)9F*h_;NH>#P~8(b-E(PmpmUujIU_0BBm}=kQ7Mzf_mAn_C_gU z6fKZfpCZQFlpySbs)%ci^X8It#G{Du=I|(Dyh(;s#6)EytKR>4qAbVF8fVIS>lHD+ zel%3X)TXii4h^g&^L&%>Gqv$sxCph>8RaL*oPt0Xu2LVHktT&?6!eg6Jyauano) zjh(!tJ7y;0K>R`cEEGY=t~Y|hEF2gR6t#^j!G#Om1?kN!W+M?~7PDyQeD6I~b?Vl= zuV3q1cpmhRp(Quey1*r0m*B7=F*f2Ghn^1B?}CsU7DYbeLXl zdB+1!AWDzE<-a?A^&NQbqhkxbax@k6Q{3)YBu~znC>h%uL5M{Ig)n+#4H;{SlYOM8 zm@5O)CV)0<(O5OrXdb9~?iaQF*BI5rsPp||)Ce4#Gm23oi1rvY0!DH+MvaR5#HbOf zAEPEfVQ_FFEYAc75tI`gL=Zt>8Gb?oOP%>T{?>{AjWe*+-YX3(BR*OJ%QTp@zkpSy zfn`V>>+c4ZAs9gY41s0%&BKwXS^~=u5t!YcIomVFU_VB&#U+r;b3lY1&8N}Dmn#iX zyU|&|3M_Vx1Del<5x|jGZ~o-h2*6p-P#XaOlYQ(&P-Hyk!fD3bTVuMEVjvu^yAT2HYa-=m#G)&QgoU} z8>FZvaDfYX3~3W=rEons9DPFvdfeSl8G?x*+9~0;bPYX54@L9o`km({TQsGbl8lo5 zlu@la2rEA&@}eCcXLHf9({CLgzo8LEg?F3bGZHmXfpM^figLCdHX*^9v8b_qe(6XB(!}jZ-l{*+(#_lGJ$+GSy1asX@2*MG&x|^^pp3(bk&wF=I zwu*%ACag^znNqqNj008m5%N1 zfC=;|r@H%;=EI>k`OoLqzM5=R*-;yj6S)wEsr>}iLCHo6Q^WKss(_LnLnn4Cz3i$k z7f)eRj?`uBBKWjp{bMETM5?a0{o}uPR8vz|^#9{};~D(f@vlHWcCES+@zGcBz5=dwg12k7)Q=>K8!=4qW$W5Oys;y~MtcA# zqSTw@!z>cm#@QI6GhdrFHd4d0;(!Jd@pHF>TGSMHmNY$oZ$L!Cc?w9M*90x)*qh$_ z(#2Wurn_RryWmaRs!I}&_Ou68A4%jJv?I$GUw8t`2nTJv7N%$Ej$lw;NKp0R5L`?= zxA^SIX9nj_fQxD9fpR=mRga1ru5_U=3on||kv6iyN5@+TvY$|o{r-zzyfxV&Sm(Nv z4jl`OM54-2VbAFq}*ya4;jR)>FO zFPrQk*J7dQV^X6xwL`}y<#f`6#F%CG%jdjv(_B?@Y#CAbGm08*}SxURp| zFMW1Ntz}xjtFDo;J{%frR*j3cx;a%91gcZ>p1y8THxUJJykp!~N!~Q>_5)kABA?Sc zUY9SqXw=Vi>ksVwaSw{r3o9@Js%95nRvC-P!(`gn#LK7Fl1>Q}JxQkR!ir)2(U_zQ z$HPH8=g!@sID%viLP~|ai-*l(6TS^sPc<~}cP`y-0EBD~8!7WnRJSgbTrZ?d|MUBY z&q3?%*c(iXi1aQ0&A!y#X91*OR^E8MP+u!A=$Ke}C-IYsW6W)$c)xwS*ms+J)VMQh z!}eXaIyU*dlCasA>F_lt+G9bLJ#$-7De9)#1WuF|cb~S9*FgHVL}-g+Z%hU|2dy!ZoX~r;+T6HydD%%d_X#;8*s|`wcK&stQ)fJ0MAU>RkQsdl)Lzv+gqe zR&O`g+k_ebCsb$(@r#-nvH3M#0ek4c`~RNoF}3aEntEXENHuV##<JVZ}clj?GjVg?rl$EsuyHXxp9uK51= zuFc0mRueqOovj;*2WP;Ic$#{4kVxzww`VZ!sY8gGmShq}C+dU@aRbMG6Y{-?3ua*^ zezO5^{AN>V{buuPC98w8V(hC&Z^64a$2Om>$M$z=RmwWircya4+CV@Ri%v8a?*@5Y zPh2<@Ys6m53sd!I@979$l9DwrDEV~G0nQr1gHt~`%MiYKGV}7~%M0~z24xvKeDpdi za11mI!$F~7(jjs9KCt8PeLzt($+d@ABt%jS zq+^pI49_nQ(6s|0-KJe>h8ZdoN)8T^;=b4yUNwCkC|%zRdO^=OU;J=o9HG7LbJe$y z-|d1bFmeVa39E_`%*Z2$Q#HTw-k!%pbU>3?e+$d`L6+a|#g) zOZLn8Lt2AZAV^t5G812T!D3Y%ZNgt~NfNmFn&3R!zuu9;#$NoAYxA2_H{b9kz5;p%O#eL2}fi4od^0@p>5 zn+aUI2%u=N5DQR4cW7sniq&`-i*l!=&a6^zFQuXaE*=uZXwKYFs^eRZa8%`#W_s^Q z%>&Z1cCh&D(#kN?Us9RVUkVPA!j&Y$7M09}Olj}SOgZ01ItPrY4c69%Yq=F!E)=Fb zETo3x8p9RiL~Lo=nlxXHejh2>869n>9mXh>bOit zB2XesCGf})TA|T!fR30^I;`R@4c@!YiFV+G!ei@$wW}X4d2<4CjU?SgDUhV02h=5k zjIl6-{+kFa#63MY-33>`*fOYFfXjL2+WF>fUIvb^X>2?v35N8#!3^uXUq6B+D$;uP zYRwD{X*~-`6pnB$^oGy&s#syc(K0KA`tD70hf#)=>`g_yW+n9uv(jfs@%*2Q!L6)9 zKeGaxD`BkPBIJV}EK%W57bO_oLy+N0+BAhHYPG%&@bbsOxeSq7Uj>YCgBA*NwPwf! zEfiwJbqh-~^i17C_>{l!99)K8z+Z?F^B0z8=o$V(_>{l!{7Z&jz+Z?F^B0z8=o$V( Vc%tSkUvQZ4(@%dH&fm3q>i@t^2u%P0 literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.a b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.a new file mode 100644 index 000000000..4caea32ed --- /dev/null +++ b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.a @@ -0,0 +1,434 @@ +! +__.PKGDEF 0 0 0 644 49694 ` +go object darwin amd64 go1.4.2 X:precisestack + +$$ +package context + import sync "sync" + import runtime "runtime" + import time "time" + import http "net/http" + import url "net/url" // indirect + type @"net/url".Userinfo struct { @"net/url".username string; @"net/url".password string; @"net/url".passwordSet bool } + func (@"net/url".u·3 *@"net/url".Userinfo "esc:0x1") Password () (? string, ? bool) { if @"net/url".u·3.@"net/url".passwordSet { return @"net/url".u·3.@"net/url".password, true }; return "", false } + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") String () (? string) + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") Username () (? string) { return @"net/url".u·2.@"net/url".username } + type @"net/url".Values map[string][]string + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Add (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = append(@"net/url".v·1[@"net/url".key·2], @"net/url".value·3) } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Del (@"net/url".key·2 string "esc:0x0") { delete(@"net/url".v·1, @"net/url".key·2) } + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Encode () (? string) + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Get (@"net/url".key·3 string "esc:0x0") (? string) { if @"net/url".v·2 == nil { return "" }; var @"net/url".vs·4 []string; ; var @"net/url".ok·5 bool; ; @"net/url".vs·4, @"net/url".ok·5 = @"net/url".v·2[@"net/url".key·3]; if !@"net/url".ok·5 || len(@"net/url".vs·4) == 0x0 { return "" }; return @"net/url".vs·4[0x0] } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Set (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = ([]string{ 0x0:@"net/url".value·3 }) } + type @"net/url".URL struct { Scheme string; Opaque string; User *@"net/url".Userinfo; Host string; Path string; RawQuery string; Fragment string } + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") IsAbs () (? bool) { return @"net/url".u·2.Scheme != "" } + func (@"net/url".u·3 *@"net/url".URL "esc:0x2") Parse (@"net/url".ref·4 string) (? *@"net/url".URL, ? error) + func (@"net/url".u·2 *@"net/url".URL) Query () (? @"net/url".Values) + func (@"net/url".u·2 *@"net/url".URL "esc:0x1") RequestURI () (? string) + func (@"net/url".u·2 *@"net/url".URL "esc:0x2") ResolveReference (@"net/url".ref·3 *@"net/url".URL "esc:0x2") (? *@"net/url".URL) + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") String () (? string) + import io "io" // indirect + type @"io".Writer interface { Write(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"net/http".keyValues struct { @"net/http".key string; @"net/http".values []string } + type @"net/http".headerSorter struct { @"net/http".kvs []@"net/http".keyValues } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Len () (? int) { return len(@"net/http".s·2.@"net/http".kvs) } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Less (@"net/http".i·3 int, @"net/http".j·4 int) (? bool) { return @"net/http".s·2.@"net/http".kvs[@"net/http".i·3].@"net/http".key < @"net/http".s·2.@"net/http".kvs[@"net/http".j·4].@"net/http".key } + func (@"net/http".s·1 *@"net/http".headerSorter "esc:0x0") Swap (@"net/http".i·2 int, @"net/http".j·3 int) { @"net/http".s·1.@"net/http".kvs[@"net/http".i·2], @"net/http".s·1.@"net/http".kvs[@"net/http".j·3] = @"net/http".s·1.@"net/http".kvs[@"net/http".j·3], @"net/http".s·1.@"net/http".kvs[@"net/http".i·2] } + type @"net/http".Header map[string][]string + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Add (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Del (@"net/http".key·2 string "esc:0x0") + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Get (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Set (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") WriteSubset (@"net/http".w·3 @"io".Writer, @"net/http".exclude·4 map[string]bool "esc:0x0") (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".clone () (? @"net/http".Header) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".get (@"net/http".key·3 string "esc:0x0") (? string) { var @"net/http".v·4 []string; ; @"net/http".v·4 = @"net/http".h·2[@"net/http".key·3]; if len(@"net/http".v·4) > 0x0 { return @"net/http".v·4[0x0] }; return "" } + func (@"net/http".h·3 @"net/http".Header "esc:0x0") @"net/http".sortedKeyValues (@"net/http".exclude·4 map[string]bool "esc:0x0") (@"net/http".kvs·1 []@"net/http".keyValues, @"net/http".hs·2 *@"net/http".headerSorter) + type @"io".ReadCloser interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error) } + import multipart "mime/multipart" // indirect + import textproto "net/textproto" // indirect + type @"net/textproto".MIMEHeader map[string][]string + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Add (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Del (@"net/textproto".key·2 string "esc:0x0") + func (@"net/textproto".h·2 @"net/textproto".MIMEHeader "esc:0x0") Get (@"net/textproto".key·3 string "esc:0x0") (? string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Set (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + type @"mime/multipart".File interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error); ReadAt(@"io".p []byte, @"io".off int64) (@"io".n int, @"io".err error); Seek(@"io".offset int64, @"io".whence int) (? int64, ? error) } + type @"mime/multipart".FileHeader struct { Filename string; Header @"net/textproto".MIMEHeader; @"mime/multipart".content []byte; @"mime/multipart".tmpfile string } + func (@"mime/multipart".fh·3 *@"mime/multipart".FileHeader) Open () (? @"mime/multipart".File, ? error) + type @"mime/multipart".Form struct { Value map[string][]string; File map[string][]*@"mime/multipart".FileHeader } + func (@"mime/multipart".f·2 *@"mime/multipart".Form "esc:0x0") RemoveAll () (? error) + import tls "crypto/tls" // indirect + import x509 "crypto/x509" // indirect + type @"crypto/x509".SignatureAlgorithm int + type @"crypto/x509".PublicKeyAlgorithm int + import big "math/big" // indirect + type @"math/big".Word uintptr + type @"math/big".divisor struct { @"math/big".bbb @"math/big".nat; @"math/big".nbits int; @"math/big".ndigits int } + import rand "math/rand" // indirect + type @"math/rand".Source interface { Int63() (? int64); Seed(@"math/rand".seed int64) } + type @"math/rand".Rand struct { @"math/rand".src @"math/rand".Source } + func (@"math/rand".r·2 *@"math/rand".Rand) ExpFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Float32 () (? float32) + func (@"math/rand".r·2 *@"math/rand".Rand) Float64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int () (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31 () (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31n (@"math/rand".n·3 int32) (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63 () (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63n (@"math/rand".n·3 int64) (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Intn (@"math/rand".n·3 int) (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) NormFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Perm (@"math/rand".n·3 int) (? []int) + func (@"math/rand".r·1 *@"math/rand".Rand) Seed (@"math/rand".seed·2 int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Uint32 () (? uint32) + type @"io".RuneScanner interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error); UnreadRune() (? error) } + type @"math/big".nat []@"math/big".Word + func (@"math/big".z·2 @"math/big".nat) @"math/big".add (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".and (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".andNot (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bit (@"math/big".i·3 uint) (? uint) { var @"math/big".j·4 int; ; @"math/big".j·4 = int(@"math/big".i·3 / 0x40); if @"math/big".j·4 >= len(@"math/big".z·2) { return 0x0 }; return uint(@"math/big".z·2[@"math/big".j·4] >> (@"math/big".i·3 % 0x40) & @"math/big".Word(0x1)) } + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".bitLen () (? int) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bytes (@"math/big".buf·3 []byte "esc:0x0") (@"math/big".i·1 int) + func (@"math/big".z·1 @"math/big".nat "esc:0x0") @"math/big".clear () + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".cmp (@"math/big".y·3 @"math/big".nat "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".q·1 @"math/big".nat) @"math/big".convertWords (@"math/big".s·2 []byte "esc:0x0", @"math/big".charset·3 string "esc:0x0", @"math/big".b·4 @"math/big".Word, @"math/big".ndigits·5 int, @"math/big".bb·6 @"math/big".Word, @"math/big".table·7 []@"math/big".divisor "esc:0x0") + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".decimalString () (? string) + func (@"math/big".z·3 @"math/big".nat) @"math/big".div (@"math/big".z2·4 @"math/big".nat, @"math/big".u·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat "esc:0x2") @"math/big".divLarge (@"math/big".u·4 @"math/big".nat, @"math/big".uIn·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat) @"math/big".divW (@"math/big".x·4 @"math/big".nat, @"math/big".y·5 @"math/big".Word) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNN (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNNWindowed (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expWW (@"math/big".x·3 @"math/big".Word, @"math/big".y·4 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".make (@"math/big".n·3 int) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat) @"math/big".modW (@"math/big".d·3 @"math/big".Word) (@"math/big".r·1 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mul (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulAddWW (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".Word, @"math/big".r·5 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulRange (@"math/big".a·3 uint64, @"math/big".b·4 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".norm () (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".or (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".n·2 @"math/big".nat) @"math/big".probablyPrime (@"math/big".reps·3 int) (? bool) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".random (@"math/big".rand·3 *@"math/rand".Rand, @"math/big".limit·4 @"math/big".nat "esc:0x0", @"math/big".n·5 int) (? @"math/big".nat) + func (@"math/big".z·4 @"math/big".nat) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? @"math/big".nat, ? int, ? error) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".set (@"math/big".x·3 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBit (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".i·4 uint, @"math/big".b·5 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBytes (@"math/big".buf·3 []byte "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setUint64 (@"math/big".x·3 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setWord (@"math/big".x·3 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shl (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shr (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".string (@"math/big".charset·3 string "esc:0x0") (? string) + func (@"math/big".z·2 @"math/big".nat) @"math/big".sub (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".trailingZeroBits () (? uint) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".xor (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + import fmt "fmt" // indirect + type @"fmt".State interface { Flag(@"fmt".c int) (? bool); Precision() (@"fmt".prec int, @"fmt".ok bool); Width() (@"fmt".wid int, @"fmt".ok bool); Write(@"fmt".b []byte) (@"fmt".ret int, @"fmt".err error) } + type @"fmt".ScanState interface { Read(@"fmt".buf []byte) (@"fmt".n int, @"fmt".err error); ReadRune() (@"fmt".r rune, @"fmt".size int, @"fmt".err error); SkipSpace(); Token(@"fmt".skipSpace bool, @"fmt".f func(? rune) (? bool)) (@"fmt".token []byte, @"fmt".err error); UnreadRune() (? error); Width() (@"fmt".wid int, @"fmt".ok bool) } + type @"math/big".Int struct { @"math/big".neg bool; @"math/big".abs @"math/big".nat } + func (@"math/big".z·2 *@"math/big".Int) Abs (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Add (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) And (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) AndNot (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Binomial (@"math/big".n·3 int64, @"math/big".k·4 int64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) Bit (@"math/big".i·3 int) (? uint) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") BitLen () (? int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x1") Bits () (? []@"math/big".Word) { return @"math/big".x·2.@"math/big".abs } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Bytes () (? []byte) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Cmp (@"math/big".y·3 *@"math/big".Int "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".z·2 *@"math/big".Int) Div (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) DivMod (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".m·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Exp (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int "esc:0x0", @"math/big".m·5 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·1 *@"math/big".Int "esc:0x0") Format (@"math/big".s·2 @"fmt".State, @"math/big".ch·3 rune) + func (@"math/big".z·2 *@"math/big".Int) GCD (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int, @"math/big".a·5 *@"math/big".Int, @"math/big".b·6 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) GobDecode (@"math/big".buf·3 []byte "esc:0x0") (? error) + func (@"math/big".x·3 *@"math/big".Int "esc:0x0") GobEncode () (? []byte, ? error) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Int64 () (? int64) + func (@"math/big".z·2 *@"math/big".Int) Lsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalText () (@"math/big".text·1 []byte, @"math/big".err·2 error) + func (@"math/big".z·2 *@"math/big".Int) Mod (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) ModInverse (@"math/big".g·3 *@"math/big".Int, @"math/big".n·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Mul (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) MulRange (@"math/big".a·3 int64, @"math/big".b·4 int64) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Neg (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Not (@"math/big".x·3 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Or (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) ProbablyPrime (@"math/big".n·3 int) (? bool) + func (@"math/big".z·2 *@"math/big".Int) Quo (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) QuoRem (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".r·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rand (@"math/big".rnd·3 *@"math/rand".Rand, @"math/big".n·4 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rem (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Scan (@"math/big".s·3 @"fmt".ScanState, @"math/big".ch·4 rune) (? error) + func (@"math/big".z·2 *@"math/big".Int) Set (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBit (@"math/big".x·3 *@"math/big".Int, @"math/big".i·4 int, @"math/big".b·5 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int "esc:0x2") SetBits (@"math/big".abs·3 []@"math/big".Word) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBytes (@"math/big".buf·3 []byte "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetInt64 (@"math/big".x·3 int64) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) SetString (@"math/big".s·4 string, @"math/big".base·5 int) (? *@"math/big".Int, ? bool) + func (@"math/big".z·2 *@"math/big".Int) SetUint64 (@"math/big".x·3 uint64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Sign () (? int) { if len(@"math/big".x·2.@"math/big".abs) == 0x0 { return 0x0 }; if @"math/big".x·2.@"math/big".neg { return -0x1 }; return 0x1 } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") String () (? string) + func (@"math/big".z·2 *@"math/big".Int) Sub (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Uint64 () (? uint64) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalJSON (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalText (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) Xor (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) @"math/big".binaryGCD (@"math/big".a·3 *@"math/big".Int, @"math/big".b·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·4 *@"math/big".Int) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? *@"math/big".Int, ? int, ? error) + import pkix "crypto/x509/pkix" // indirect + import asn1 "encoding/asn1" // indirect + type @"encoding/asn1".ObjectIdentifier []int + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") Equal (@"encoding/asn1".other·3 @"encoding/asn1".ObjectIdentifier "esc:0x0") (? bool) + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") String () (? string) + type @"crypto/x509/pkix".AttributeTypeAndValue struct { Type @"encoding/asn1".ObjectIdentifier; Value interface {} } + type @"crypto/x509/pkix".RelativeDistinguishedNameSET []@"crypto/x509/pkix".AttributeTypeAndValue + type @"crypto/x509/pkix".RDNSequence []@"crypto/x509/pkix".RelativeDistinguishedNameSET + type @"crypto/x509/pkix".Name struct { Country []string; Organization []string; OrganizationalUnit []string; Locality []string; Province []string; StreetAddress []string; PostalCode []string; SerialNumber string; CommonName string; Names []@"crypto/x509/pkix".AttributeTypeAndValue } + func (@"crypto/x509/pkix".n·1 *@"crypto/x509/pkix".Name) FillFromRDNSequence (@"crypto/x509/pkix".rdns·2 *@"crypto/x509/pkix".RDNSequence "esc:0x0") + func (@"crypto/x509/pkix".n·2 @"crypto/x509/pkix".Name) ToRDNSequence () (@"crypto/x509/pkix".ret·1 @"crypto/x509/pkix".RDNSequence) + type @"time".zone struct { @"time".name string; @"time".offset int; @"time".isDST bool } + type @"time".zoneTrans struct { @"time".when int64; @"time".index uint8; @"time".isstd bool; @"time".isutc bool } + type @"time".Location struct { @"time".name string; @"time".zone []@"time".zone; @"time".tx []@"time".zoneTrans; @"time".cacheStart int64; @"time".cacheEnd int64; @"time".cacheZone *@"time".zone } + func (@"time".l·2 *@"time".Location "esc:0x0") String () (? string) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".firstZoneUsed () (? bool) + func (@"time".l·2 *@"time".Location "esc:0x2") @"time".get () (? *@"time".Location) + func (@"time".l·6 *@"time".Location "esc:0x1") @"time".lookup (@"time".sec·7 int64) (@"time".name·1 string, @"time".offset·2 int, @"time".isDST·3 bool, @"time".start·4 int64, @"time".end·5 int64) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".lookupFirstZone () (? int) + func (@"time".l·4 *@"time".Location "esc:0x0") @"time".lookupName (@"time".name·5 string "esc:0x0", @"time".unix·6 int64) (@"time".offset·1 int, @"time".isDST·2 bool, @"time".ok·3 bool) + type @"time".Duration int64 + func (@"time".d·2 @"time".Duration) Hours () (? float64) { var @"time".hour·3 @"time".Duration; ; @"time".hour·3 = @"time".d·2 / @"time".Duration(0x34630B8A000); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x34630B8A000); return float64(@"time".hour·3) + float64(@"time".nsec·4) * 0x9C5FFF26ED75Fp-93 } + func (@"time".d·2 @"time".Duration) Minutes () (? float64) { var @"time".min·3 @"time".Duration; ; @"time".min·3 = @"time".d·2 / @"time".Duration(0xDF8475800); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0xDF8475800); return float64(@"time".min·3) + float64(@"time".nsec·4) * 0x9299FF347E9E9p-87 } + func (@"time".d·2 @"time".Duration) Nanoseconds () (? int64) { return int64(@"time".d·2) } + func (@"time".d·2 @"time".Duration) Seconds () (? float64) { var @"time".sec·3 @"time".Duration; ; @"time".sec·3 = @"time".d·2 / @"time".Duration(0x3B9ACA00); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x3B9ACA00); return float64(@"time".sec·3) + float64(@"time".nsec·4) * 0x112E0BE826D695p-82 } + func (@"time".d·2 @"time".Duration) String () (? string) + type @"time".Month int + func (@"time".m·2 @"time".Month) String () (? string) { return @"time".months[@"time".m·2 - @"time".Month(0x1)] } + type @"time".Weekday int + func (@"time".d·2 @"time".Weekday) String () (? string) { return @"time".days[@"time".d·2] } + type @"time".Time struct { @"time".sec int64; @"time".nsec int32; @"time".loc *@"time".Location } + func (@"time".t·2 @"time".Time "esc:0x2") Add (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") AddDate (@"time".years·3 int, @"time".months·4 int, @"time".days·5 int) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") After (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec > @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec > @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Before (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec < @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec < @"time".u·3.@"time".nsec } + func (@"time".t·4 @"time".Time "esc:0x0") Clock () (@"time".hour·1 int, @"time".min·2 int, @"time".sec·3 int) + func (@"time".t·4 @"time".Time "esc:0x0") Date () (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int) + func (@"time".t·2 @"time".Time "esc:0x0") Day () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Equal (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec == @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Format (@"time".layout·3 string "esc:0x0") (? string) + func (@"time".t·2 *@"time".Time "esc:0x0") GobDecode (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·3 @"time".Time "esc:0x0") GobEncode () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Hour () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") ISOWeek () (@"time".year·1 int, @"time".week·2 int) + func (@"time".t·2 @"time".Time "esc:0x2") In (@"time".loc·3 *@"time".Location "esc:0x2") (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") IsZero () (? bool) { return @"time".t·2.@"time".sec == 0x0 && @"time".t·2.@"time".nsec == 0x0 } + func (@"time".t·2 @"time".Time "esc:0x2") Local () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".Local; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x2") Location () (? *@"time".Location) { var @"time".l·3 *@"time".Location; ; @"time".l·3 = @"time".t·2.@"time".loc; if @"time".l·3 == nil { @"time".l·3 = @"time".UTC }; return @"time".l·3 } + func (@"time".t·3 @"time".Time "esc:0x0") MarshalBinary () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalText () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Minute () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Month () (? @"time".Month) + func (@"time".t·2 @"time".Time "esc:0x0") Nanosecond () (? int) { return int(@"time".t·2.@"time".nsec) } + func (@"time".t·2 @"time".Time "esc:0x2") Round (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") Second () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") String () (? string) + func (@"time".t·2 @"time".Time "esc:0x0") Sub (@"time".u·3 @"time".Time "esc:0x0") (? @"time".Duration) + func (@"time".t·2 @"time".Time "esc:0x2") Truncate (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") UTC () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".UTC; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x0") Unix () (? int64) { return @"time".t·2.@"time".sec + -0xE7791F700 } + func (@"time".t·2 @"time".Time "esc:0x0") UnixNano () (? int64) { return (@"time".t·2.@"time".sec + -0xE7791F700) * 0x3B9ACA00 + int64(@"time".t·2.@"time".nsec) } + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalBinary (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalJSON (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalText (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 @"time".Time "esc:0x0") Weekday () (? @"time".Weekday) + func (@"time".t·2 @"time".Time "esc:0x0") Year () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") YearDay () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") Zone () (@"time".name·1 string, @"time".offset·2 int) + func (@"time".t·2 @"time".Time "esc:0x0") @"time".abs () (? uint64) + func (@"time".t·5 @"time".Time "esc:0x0") @"time".date (@"time".full·6 bool) (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int, @"time".yday·4 int) + func (@"time".t·4 @"time".Time "esc:0x1") @"time".locabs () (@"time".name·1 string, @"time".offset·2 int, @"time".abs·3 uint64) + type @"crypto/x509".KeyUsage int + type @"crypto/x509/pkix".Extension struct { Id @"encoding/asn1".ObjectIdentifier; Critical bool "asn1:\"optional\""; Value []byte } + type @"crypto/x509".ExtKeyUsage int + import net "net" // indirect + type @"net".IPMask []byte + func (@"net".m·3 @"net".IPMask "esc:0x0") Size () (@"net".ones·1 int, @"net".bits·2 int) + func (@"net".m·2 @"net".IPMask "esc:0x0") String () (? string) + type @"net".IP []byte + func (@"net".ip·2 @"net".IP "esc:0x0") DefaultMask () (? @"net".IPMask) + func (@"net".ip·2 @"net".IP "esc:0x0") Equal (@"net".x·3 @"net".IP "esc:0x0") (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsGlobalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsInterfaceLocalMulticast () (? bool) { return len(@"net".ip·2) == 0x10 && @"net".ip·2[0x0] == byte(0xFF) && @"net".ip·2[0x1] & byte(0xF) == byte(0x1) } + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLoopback () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsUnspecified () (? bool) + func (@"net".ip·3 @"net".IP "esc:0x0") MarshalText () (? []byte, ? error) + func (@"net".ip·2 @"net".IP "esc:0x0") Mask (@"net".mask·3 @"net".IPMask "esc:0x0") (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x0") String () (? string) + func (@"net".ip·2 @"net".IP "esc:0x2") To16 () (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x2") To4 () (? @"net".IP) + func (@"net".ip·2 *@"net".IP "esc:0x0") UnmarshalText (@"net".text·3 []byte "esc:0x0") (? error) + type @"encoding/asn1".RawContent []byte + type @"encoding/asn1".RawValue struct { Class int; Tag int; IsCompound bool; Bytes []byte; FullBytes []byte } + type @"crypto/x509/pkix".AlgorithmIdentifier struct { Algorithm @"encoding/asn1".ObjectIdentifier; Parameters @"encoding/asn1".RawValue "asn1:\"optional\"" } + type @"crypto/x509/pkix".RevokedCertificate struct { SerialNumber *@"math/big".Int; RevocationTime @"time".Time; Extensions []@"crypto/x509/pkix".Extension "asn1:\"optional\"" } + type @"crypto/x509/pkix".TBSCertificateList struct { Raw @"encoding/asn1".RawContent; Version int "asn1:\"optional,default:2\""; Signature @"crypto/x509/pkix".AlgorithmIdentifier; Issuer @"crypto/x509/pkix".RDNSequence; ThisUpdate @"time".Time; NextUpdate @"time".Time "asn1:\"optional\""; RevokedCertificates []@"crypto/x509/pkix".RevokedCertificate "asn1:\"optional\""; Extensions []@"crypto/x509/pkix".Extension "asn1:\"tag:0,optional,explicit\"" } + type @"encoding/asn1".BitString struct { Bytes []byte; BitLength int } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x0") At (@"encoding/asn1".i·3 int) (? int) { if @"encoding/asn1".i·3 < 0x0 || @"encoding/asn1".i·3 >= @"encoding/asn1".b·2.BitLength { return 0x0 }; var @"encoding/asn1".x·4 int; ; @"encoding/asn1".x·4 = @"encoding/asn1".i·3 / 0x8; var @"encoding/asn1".y·5 uint; ; @"encoding/asn1".y·5 = 0x7 - uint(@"encoding/asn1".i·3 % 0x8); return int(@"encoding/asn1".b·2.Bytes[@"encoding/asn1".x·4] >> @"encoding/asn1".y·5) & 0x1 } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x2") RightAlign () (? []byte) + type @"crypto/x509/pkix".CertificateList struct { TBSCertList @"crypto/x509/pkix".TBSCertificateList; SignatureAlgorithm @"crypto/x509/pkix".AlgorithmIdentifier; SignatureValue @"encoding/asn1".BitString } + func (@"crypto/x509/pkix".certList·2 *@"crypto/x509/pkix".CertificateList "esc:0x0") HasExpired (@"crypto/x509/pkix".now·3 @"time".Time "esc:0x0") (? bool) + type @"io".Reader interface { Read(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"crypto/x509".CertPool struct { @"crypto/x509".bySubjectKeyId map[string][]int; @"crypto/x509".byName map[string][]int; @"crypto/x509".certs []*@"crypto/x509".Certificate } + func (@"crypto/x509".s·1 *@"crypto/x509".CertPool) AddCert (@"crypto/x509".cert·2 *@"crypto/x509".Certificate) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool) AppendCertsFromPEM (@"crypto/x509".pemCerts·3 []byte) (@"crypto/x509".ok·1 bool) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool "esc:0x0") Subjects () (@"crypto/x509".res·1 [][]byte) + func (@"crypto/x509".s·4 *@"crypto/x509".CertPool "esc:0x0") @"crypto/x509".findVerifiedParents (@"crypto/x509".cert·5 *@"crypto/x509".Certificate) (@"crypto/x509".parents·1 []int, @"crypto/x509".errCert·2 *@"crypto/x509".Certificate, @"crypto/x509".err·3 error) + type @"crypto/x509".VerifyOptions struct { DNSName string; Intermediates *@"crypto/x509".CertPool; Roots *@"crypto/x509".CertPool; CurrentTime @"time".Time; KeyUsages []@"crypto/x509".ExtKeyUsage } + type @"crypto/x509".Certificate struct { Raw []byte; RawTBSCertificate []byte; RawSubjectPublicKeyInfo []byte; RawSubject []byte; RawIssuer []byte; Signature []byte; SignatureAlgorithm @"crypto/x509".SignatureAlgorithm; PublicKeyAlgorithm @"crypto/x509".PublicKeyAlgorithm; PublicKey interface {}; Version int; SerialNumber *@"math/big".Int; Issuer @"crypto/x509/pkix".Name; Subject @"crypto/x509/pkix".Name; NotBefore @"time".Time; NotAfter @"time".Time; KeyUsage @"crypto/x509".KeyUsage; Extensions []@"crypto/x509/pkix".Extension; ExtraExtensions []@"crypto/x509/pkix".Extension; ExtKeyUsage []@"crypto/x509".ExtKeyUsage; UnknownExtKeyUsage []@"encoding/asn1".ObjectIdentifier; BasicConstraintsValid bool; IsCA bool; MaxPathLen int; MaxPathLenZero bool; SubjectKeyId []byte; AuthorityKeyId []byte; OCSPServer []string; IssuingCertificateURL []string; DNSNames []string; EmailAddresses []string; IPAddresses []@"net".IP; PermittedDNSDomainsCritical bool; PermittedDNSDomains []string; CRLDistributionPoints []string; PolicyIdentifiers []@"encoding/asn1".ObjectIdentifier } + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckCRLSignature (@"crypto/x509".crl·3 *@"crypto/x509/pkix".CertificateList) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignature (@"crypto/x509".algo·3 @"crypto/x509".SignatureAlgorithm, @"crypto/x509".signed·4 []byte, @"crypto/x509".signature·5 []byte) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignatureFrom (@"crypto/x509".parent·3 *@"crypto/x509".Certificate) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) CreateCRL (@"crypto/x509".rand·4 @"io".Reader, @"crypto/x509".priv·5 interface {}, @"crypto/x509".revokedCerts·6 []@"crypto/x509/pkix".RevokedCertificate, @"crypto/x509".now·7 @"time".Time, @"crypto/x509".expiry·8 @"time".Time) (@"crypto/x509".crlBytes·1 []byte, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x0") Equal (@"crypto/x509".other·3 *@"crypto/x509".Certificate "esc:0x0") (? bool) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) Verify (@"crypto/x509".opts·4 @"crypto/x509".VerifyOptions "esc:0x4") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") VerifyHostname (@"crypto/x509".h·3 string "esc:0x2") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) @"crypto/x509".buildChains (@"crypto/x509".cache·4 map[int][][]*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".currentChain·5 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·6 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") @"crypto/x509".isValid (@"crypto/x509".certType·3 int, @"crypto/x509".currentChain·4 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·5 *@"crypto/x509".VerifyOptions "esc:0x0") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate "esc:0x0") @"crypto/x509".systemVerify (@"crypto/x509".opts·4 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) { return nil, nil } + type @"crypto/tls".ConnectionState struct { Version uint16; HandshakeComplete bool; DidResume bool; CipherSuite uint16; NegotiatedProtocol string; NegotiatedProtocolIsMutual bool; ServerName string; PeerCertificates []*@"crypto/x509".Certificate; VerifiedChains [][]*@"crypto/x509".Certificate; TLSUnique []byte } + type @"net/http".Cookie struct { Name string; Value string; Path string; Domain string; Expires @"time".Time; RawExpires string; MaxAge int; Secure bool; HttpOnly bool; Raw string; Unparsed []string } + func (@"net/http".c·2 *@"net/http".Cookie) String () (? string) + import bufio "bufio" // indirect + type @"bufio".Reader struct { @"bufio".buf []byte; @"bufio".rd @"io".Reader; @"bufio".r int; @"bufio".w int; @"bufio".err error; @"bufio".lastByte int; @"bufio".lastRuneSize int } + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") Buffered () (? int) { return @"bufio".b·2.@"bufio".w - @"bufio".b·2.@"bufio".r } + func (@"bufio".b·3 *@"bufio".Reader) Peek (@"bufio".n·4 int) (? []byte, ? error) + func (@"bufio".b·3 *@"bufio".Reader) Read (@"bufio".p·4 []byte) (@"bufio".n·1 int, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadByte () (@"bufio".c·1 byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadBytes (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadLine () (@"bufio".line·1 []byte, @"bufio".isPrefix·2 bool, @"bufio".err·3 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadRune () (@"bufio".r·1 rune, @"bufio".size·2 int, @"bufio".err·3 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadSlice (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadString (@"bufio".delim·4 byte) (@"bufio".line·1 string, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) Reset (@"bufio".r·2 @"io".Reader) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadByte () (? error) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadRune () (? error) { if @"bufio".b·2.@"bufio".lastRuneSize < 0x0 || @"bufio".b·2.@"bufio".r < @"bufio".b·2.@"bufio".lastRuneSize { return @"bufio".ErrInvalidUnreadRune }; @"bufio".b·2.@"bufio".r -= @"bufio".b·2.@"bufio".lastRuneSize; @"bufio".b·2.@"bufio".lastByte = -0x1; @"bufio".b·2.@"bufio".lastRuneSize = -0x1; return nil } + func (@"bufio".b·3 *@"bufio".Reader) WriteTo (@"bufio".w·4 @"io".Writer) (@"bufio".n·1 int64, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) @"bufio".fill () + func (@"bufio".b·2 *@"bufio".Reader "esc:0x1") @"bufio".readErr () (? error) { var @"bufio".err·3 error; ; @"bufio".err·3 = @"bufio".b·2.@"bufio".err; @"bufio".b·2.@"bufio".err = nil; return @"bufio".err·3 } + func (@"bufio".b·1 *@"bufio".Reader "esc:0x0") @"bufio".reset (@"bufio".buf·2 []byte, @"bufio".r·3 @"io".Reader) { *@"bufio".b·1 = (@"bufio".Reader{ @"bufio".buf:@"bufio".buf·2, @"bufio".rd:@"bufio".r·3, @"bufio".lastByte:-0x1, @"bufio".lastRuneSize:-0x1 }) } + func (@"bufio".b·3 *@"bufio".Reader) @"bufio".writeBuf (@"bufio".w·4 @"io".Writer) (? int64, ? error) + import bytes "bytes" // indirect + type @"bytes".readOp int + type @"bytes".Buffer struct { @"bytes".buf []byte; @"bytes".off int; @"bytes".runeBytes [4]byte; @"bytes".bootstrap [64]byte; @"bytes".lastRead @"bytes".readOp } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Bytes () (? []byte) { return @"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:] } + func (@"bytes".b·1 *@"bytes".Buffer) Grow (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") Len () (? int) { return len(@"bytes".b·2.@"bytes".buf) - @"bytes".b·2.@"bytes".off } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Next (@"bytes".n·3 int) (? []byte) + func (@"bytes".b·3 *@"bytes".Buffer) Read (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadByte () (@"bytes".c·1 byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadBytes (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadFrom (@"bytes".r·4 @"io".Reader) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·4 *@"bytes".Buffer) ReadRune () (@"bytes".r·1 rune, @"bytes".size·2 int, @"bytes".err·3 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadString (@"bytes".delim·4 byte) (@"bytes".line·1 string, @"bytes".err·2 error) + func (@"bytes".b·1 *@"bytes".Buffer) Reset () + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") String () (? string) { if @"bytes".b·2 == nil { return "" }; return string(@"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:]) } + func (@"bytes".b·1 *@"bytes".Buffer) Truncate (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadByte () (? error) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadRune () (? error) + func (@"bytes".b·3 *@"bytes".Buffer) Write (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) WriteByte (@"bytes".c·3 byte) (? error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteRune (@"bytes".r·4 rune) (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteString (@"bytes".s·4 string "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteTo (@"bytes".w·4 @"io".Writer) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) @"bytes".grow (@"bytes".n·3 int) (? int) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x1") @"bytes".readSlice (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + type @"mime/multipart".Part struct { Header @"net/textproto".MIMEHeader; @"mime/multipart".buffer *@"bytes".Buffer; @"mime/multipart".mr *@"mime/multipart".Reader; @"mime/multipart".bytesRead int; @"mime/multipart".disposition string; @"mime/multipart".dispositionParams map[string]string; @"mime/multipart".r @"io".Reader } + func (@"mime/multipart".p·2 *@"mime/multipart".Part) Close () (? error) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FileName () (? string) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FormName () (? string) + func (@"mime/multipart".p·3 *@"mime/multipart".Part) Read (@"mime/multipart".d·4 []byte) (@"mime/multipart".n·1 int, @"mime/multipart".err·2 error) + func (@"mime/multipart".p·1 *@"mime/multipart".Part "esc:0x0") @"mime/multipart".parseContentDisposition () + func (@"mime/multipart".bp·2 *@"mime/multipart".Part) @"mime/multipart".populateHeaders () (? error) + type @"mime/multipart".Reader struct { @"mime/multipart".bufReader *@"bufio".Reader; @"mime/multipart".currentPart *@"mime/multipart".Part; @"mime/multipart".partsRead int; @"mime/multipart".nl []byte; @"mime/multipart".nlDashBoundary []byte; @"mime/multipart".dashBoundaryDash []byte; @"mime/multipart".dashBoundary []byte } + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) NextPart () (? *@"mime/multipart".Part, ? error) + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) ReadForm (@"mime/multipart".maxMemory·4 int64) (@"mime/multipart".f·1 *@"mime/multipart".Form, @"mime/multipart".err·2 error) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader) @"mime/multipart".isBoundaryDelimiterLine (@"mime/multipart".line·3 []byte "esc:0x0") (@"mime/multipart".ret·1 bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".isFinalBoundary (@"mime/multipart".line·3 []byte "esc:0x0") (? bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".peekBufferIsEmptyPart (@"mime/multipart".peek·3 []byte "esc:0x0") (? bool) + type @"net/http".Request struct { Method string; URL *@"net/url".URL; Proto string; ProtoMajor int; ProtoMinor int; Header @"net/http".Header; Body @"io".ReadCloser; ContentLength int64; TransferEncoding []string; Close bool; Host string; Form @"net/url".Values; PostForm @"net/url".Values; MultipartForm *@"mime/multipart".Form; Trailer @"net/http".Header; RemoteAddr string; RequestURI string; TLS *@"crypto/tls".ConnectionState } + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") AddCookie (@"net/http".c·2 *@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request "esc:0x0") BasicAuth () (@"net/http".username·1 string, @"net/http".password·2 string, @"net/http".ok·3 bool) + func (@"net/http".r·3 *@"net/http".Request "esc:0x0") Cookie (@"net/http".name·4 string "esc:0x0") (? *@"net/http".Cookie, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Cookies () (? []*@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request) FormFile (@"net/http".key·5 string "esc:0x0") (? @"mime/multipart".File, ? *@"mime/multipart".FileHeader, ? error) + func (@"net/http".r·2 *@"net/http".Request) FormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·3 *@"net/http".Request) MultipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request) ParseForm () (? error) + func (@"net/http".r·2 *@"net/http".Request) ParseMultipartForm (@"net/http".maxMemory·3 int64) (? error) + func (@"net/http".r·2 *@"net/http".Request) PostFormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") ProtoAtLeast (@"net/http".major·3 int, @"net/http".minor·4 int) (? bool) { return @"net/http".r·2.ProtoMajor > @"net/http".major·3 || @"net/http".r·2.ProtoMajor == @"net/http".major·3 && @"net/http".r·2.ProtoMinor >= @"net/http".minor·4 } + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Referer () (? string) + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") SetBasicAuth (@"net/http".username·2 string "esc:0x0", @"net/http".password·3 string "esc:0x0") + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") UserAgent () (? string) + func (@"net/http".r·2 *@"net/http".Request) Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·2 *@"net/http".Request) WriteProxy (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·1 *@"net/http".Request) @"net/http".closeBody () + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".expectsContinue () (? bool) + func (@"net/http".r·3 *@"net/http".Request) @"net/http".multipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsClose () (? bool) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsHttp10KeepAlive () (? bool) + func (@"net/http".req·2 *@"net/http".Request) @"net/http".write (@"net/http".w·3 @"io".Writer, @"net/http".usingProxy·4 bool, @"net/http".extraHeaders·5 @"net/http".Header "esc:0x0") (? error) + func @"".Set (@"".r·1 *@"net/http".Request, @"".key·2 interface {}, @"".val·3 interface {}) + func @"".Get (@"".r·2 *@"net/http".Request "esc:0x0", @"".key·3 interface {} "esc:0x0") (? interface {}) + func @"".GetOk (@"".r·3 *@"net/http".Request "esc:0x0", @"".key·4 interface {} "esc:0x0") (? interface {}, ? bool) + func @"".GetAll (@"".r·2 *@"net/http".Request "esc:0x0") (? map[interface {}]interface {}) + func @"".GetAllOk (@"".r·3 *@"net/http".Request "esc:0x0") (? map[interface {}]interface {}, ? bool) + func @"".Delete (@"".r·1 *@"net/http".Request "esc:0x0", @"".key·2 interface {} "esc:0x0") + func @"".Clear (@"".r·1 *@"net/http".Request "esc:0x0") + func @"".Purge (@"".maxAge·2 int) (? int) + type @"net/http".ResponseWriter interface { Header() (? @"net/http".Header); Write(? []byte) (? int, ? error); WriteHeader(? int) } + type @"net/http".Handler interface { ServeHTTP(? @"net/http".ResponseWriter, ? *@"net/http".Request) } + func @"".ClearHandler (@"".h·2 @"net/http".Handler) (? @"net/http".Handler) + func @"".init () + var @"time".months [12]string + var @"time".days [7]string + var @"time".Local *@"time".Location + var @"time".UTC *@"time".Location + var @"bufio".ErrInvalidUnreadRune error + +$$ +_go_.6 0 0 0 644 49111 ` +go object darwin amd64 go1.4.2 X:precisestack + +! +go13ldnet/http.a sync.a time.aþ "".SetÀ¢eH‹ %H;awèëêHƒìpHH‰$èH‹D$xHH‰$H‹H‰\$H‰D$èH‹\$H‹1íH9ë…iH‹\$xH‰\$0HH‰$HÇD$èH‹\$H‰\$(HH‰$H‹H‰\$H\$0H‰\$H\$(H‰\$èH‹\$xH‰\$0èH‹$‹L$H‹D$‰L$`H‰D$hH‰\$XH½ nˆñÿÿÿHëH‰\$ HH‰$H‹H‰\$H\$0H‰\$H\$ H‰\$èH‹D$xHH‰$H‹H‰\$H‰D$èH‹\$H‹+H‹œ$€H‰\$HH‹œ$ˆH‰\$PH‹œ$H‰\$8H‹œ$˜H‰\$@HH‰$H‰l$H\$HH‰\$H\$8H‰\$èHH‰$èHƒÄpÃé[ÿÿÿ. + 0runtime.morestack_noctxt:"".mutexL(sync.(*RWMutex).Lockdptype.map[*net/http.Request]map[interface {}]interface {}z"".data˜2runtime.mapaccess1_fast64àDtype.map[interface {}]interface {}„runtime.makemap¦ptype.map[*net/http.Request]map[interface {}]interface {}¼"".dataø$runtime.mapassign1–time.Nowþ@type.map[*net/http.Request]int64”"".datatÐ$runtime.mapassign1èptype.map[*net/http.Request]map[interface {}]interface {}þ"".dataœ2runtime.mapaccess1_fast64¢Dtype.map[interface {}]interface {}æ$runtime.mapassign1ô"".mutex†,sync.(*RWMutex).UnlockPà"".autotmp_0011o"type.interface {}"".autotmp_0010O"type.interface {}"".autotmp_0009Dtype.map[interface {}]interface {}"".autotmp_0008,type.*net/http.Request"".autotmp_0007Ÿtype.int64"".autotmp_0005,type.*net/http.Request"".autotmp_0004Dtype.map[interface {}]interface {}"".autotmp_0003,type.*net/http.Requesttime.t·2/type.time.Time "".val0"type.interface {} "".key"type.interface {}"".r,type.*net/http.Requestà±ßàà(*9]l‹$%\:ƒeTgclocals·1245bf52b89d91ace0efa42912a5da62Tgclocals·341ab1f3c66e663a93f22f10d9eab46d¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ "".GetÀ¨eH‹ %H;awèëêHƒì@HÇD$`HÇD$hHH‰$èH‹D$HHH‰$H‹H‰\$H‰D$èH‹\$H‹1íH9èt|H‹\$PH‰\$0H‹\$XH‰\$8HH‰$H‰D$H\$0H‰\$èH‹\$Hƒût:H‹ H‹kH‰L$ H‰l$(HH‰$èH‹\$ H‰\$`H‹\$(H‰\$hHƒÄ@ÉëÂHH‰$èHÇD$`HÇD$hHƒÄ@à + 0runtime.morestack_noctxt^"".mutexp*sync.(*RWMutex).RLockˆptype.map[*net/http.Request]map[interface {}]interface {}ž"".data¼2runtime.mapaccess1_fast64Dtype.map[interface {}]interface {}À$runtime.mapaccess1†"".mutex˜.sync.(*RWMutex).RUnlockà"".mutexò.sync.(*RWMutex).RUnlockP€ +"".autotmp_0015"type.interface {}"".value?"type.interface {} "".~r20"type.interface {} "".key"type.interface {}"".r,type.*net/http.Request€Î€*  &@,5O#7h,-(Tgclocals·473d4314ba155bc5d9af9ad66f1c242aTgclocals·4ac5edf299a8cadc46c808258e6fc6d0¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".GetOk€èeH‹ %H;awèëêHƒìPHÇD$pHÇD$xHH‰$èH‹D$XHH‰$H‹H‰\$H‰D$èH‹L$¶\$ Hƒù„÷€û„¿H‹D$XHH‰$H‹H‰\$H‰D$èH‹\$H‹+H‹\$`H‰\$@H‹\$hH‰\$HHH‰$H‰l$H\$@H‰\$èH‹D$¶\$ ˆ\$/HƒøtFH‹(H‰l$0H‹hH‰l$8HH‰$èH‹\$0H‰\$pH‹\$8H‰\$x¶\$/ˆœ$€HƒÄPÉë¶HH‰$èHÇD$pHÇD$xÆ„$€HƒÄPÉéÿÿÿ + 0runtime.morestack_noctxt^"".mutexp*sync.(*RWMutex).RLockˆptype.map[*net/http.Request]map[interface {}]interface {}ž"".data¼2runtime.mapaccess2_fast64Žptype.map[*net/http.Request]map[interface {}]interface {}¤"".dataÂ2runtime.mapaccess1_fast64ˆDtype.map[interface {}]interface {}¸$runtime.mapaccess2"".mutex¢.sync.(*RWMutex).RUnlock‚"".mutex”.sync.(*RWMutex).RUnlock` "".autotmp_0020"type.interface {}"".autotmp_0018,type.*net/http.Request +"".okAtype.bool"".value?"type.interface {} "".~r3Ptype.bool "".~r20"type.interface {} "".key"type.interface {}"".r,type.*net/http.Request& ŸŸ 2Ÿ €,X,C†% 7¤597Tgclocals·a5bd3918675a24472245dfa9083d6deeTgclocals·4ac5edf299a8cadc46c808258e6fc6d0¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".GetAll€úeH‹ %HD$¨H;AwèëåHìØHH‰$èH‹„$àHH‰$H‹H‰\$H‰D$èH‹L$¶\$ H‹)€û„hH‰ëH‰l$0HƒýtH‹H‰ØHH‰$H‰D$èH‹\$H‰\$(H‹L$0H¼$ˆ1ÀèHH‰$H‰L$Hœ$ˆH‰\$èH‹œ$ˆ1íH9넹H‹œ$Hƒû„ÓH‹ H‹CH‹œ$ˆHƒû„³H‹3H‹kH‰L$XH‰D$`H‰t$HH‰t$xH‰l$PH‰¬$€H‰L$8H‰L$hH‰D$@H‰D$pHH‰$H‹\$(H‰\$H\$xH‰\$H\$hH‰\$èHœ$ˆH‰$èH‹œ$ˆ1íH9ë…GÿÿÿHH‰$èH‹\$(H‰œ$èHÄØÉéFÿÿÿ‰é&ÿÿÿHH‰$èHÇ„$èHÄØÃ& +*0runtime.morestack_noctxtJ"".mutex\*sync.(*RWMutex).RLockzptype.map[*net/http.Request]map[interface {}]interface {}"".data®2runtime.mapaccess2_fast64Dtype.map[interface {}]interface {}¬runtime.makemapèØ runtime.duffzeroöDtype.map[interface {}]interface {}¬&runtime.mapiterinit®Dtype.map[interface {}]interface {}ü$runtime.mapassign1ž&runtime.mapiternextÒ"".mutexä.sync.(*RWMutex).RUnlock¸"".mutexÊ.sync.(*RWMutex).RUnlock °"".autotmp_0031ÿ"type.interface {}"".autotmp_0029ß"type.interface {}"".autotmp_0028¿"type.interface {}"".autotmp_0027ŸNtype.map.iter[interface {}]interface {}"".autotmp_0026Dtype.map[interface {}]interface {}"".v¿"type.interface {}"".kŸ"type.interface {}"".resultßDtype.map[interface {}]interface {}"".contextÏDtype.map[interface {}]interface {} "".~r1Dtype.map[interface {}]interface {}"".r,type.*net/http.Request "°¨¯°1¯€4p"?3…Y$ $-h@¨#3Tgclocals·7ba969af8c72fca351526f5bd553df36Tgclocals·af2d3162727927f9557359011162d41c¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".GetAllOkàÆeH‹ %HD$ H;AwèëåHìàHH‰$èH‹„$èHH‰$H‹H‰\$H‰D$èH‹D$¶\$ ˆ\$/H‹H‰\$8HƒûtH‹H‰ØHH‰$H‰D$èH‹\$H‰\$0H‹L$8H¼$1ÀèHH‰$H‰L$Hœ$H‰\$èH‹œ$1íH9ë„¿H‹œ$˜Hƒû„åH‹ H‹CH‹œ$Hƒû„ÅH‹3H‹kH‰L$`H‰D$hH‰t$PH‰´$€H‰l$XH‰¬$ˆH‰L$@H‰L$pH‰D$HH‰D$xHH‰$H‹\$0H‰\$Hœ$€H‰\$H\$pH‰\$èHœ$H‰$èH‹œ$1íH9ë…AÿÿÿHH‰$èH‹\$0H‰œ$ð¶\$/ˆœ$øHÄàÉé4ÿÿÿ‰éÿÿÿ" +*0runtime.morestack_noctxtJ"".mutex\*sync.(*RWMutex).RLockzptype.map[*net/http.Request]map[interface {}]interface {}"".data®2runtime.mapaccess2_fast64€Dtype.map[interface {}]interface {}œruntime.makemapØØ runtime.duffzeroæDtype.map[interface {}]interface {}œ&runtime.mapiterinit¤Dtype.map[interface {}]interface {}ø$runtime.mapassign1š&runtime.mapiternextÎ"".mutexà.sync.(*RWMutex).RUnlock0À"".autotmp_0040ÿ"type.interface {}"".autotmp_0038ß"type.interface {}"".autotmp_0037¿"type.interface {}"".autotmp_0036ŸNtype.map.iter[interface {}]interface {}"".autotmp_0035Dtype.map[interface {}]interface {}"".v¿"type.interface {}"".kŸ"type.interface {}"".resultßDtype.map[interface {}]interface {} +"".okátype.bool"".contextÏDtype.map[interface {}]interface {} "".~r2 type.bool "".~r1Dtype.map[interface {}]interface {}"".r,type.*net/http.Request"À²¿Àð.":0…_$! -`@®#ATgclocals·b46c7a32cd3cbdb99d262657bbb5cb46Tgclocals·af2d3162727927f9557359011162d41c¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".DeleteÀ®eH‹ %H;awèëêHƒì0HH‰$èH‹D$8HH‰$H‹H‰\$H‰D$èH‹\$H‹1íH9ëtvH‹D$8HH‰$H‹H‰\$H‰D$èH‹\$H‹+H‹\$@H‰\$ H‹\$HH‰\$(HH‰$H‰l$H\$ H‰\$èHH‰$èHƒÄ0Ãëé + 0runtime.morestack_noctxt:"".mutexL(sync.(*RWMutex).Lockdptype.map[*net/http.Request]map[interface {}]interface {}z"".data˜2runtime.mapaccess1_fast64Îptype.map[*net/http.Request]map[interface {}]interface {}ä"".data‚2runtime.mapaccess1_fast64ÈDtype.map[interface {}]interface {}ø"runtime.mapdelete†"".mutex˜,sync.(*RWMutex).Unlock0`"".autotmp_0045"type.interface {} "".key"type.interface {}"".r,type.*net/http.Request`º_` à ¨5a %–Tgclocals·e6f65f4d24a282ff6c0e03aa9f867bfeTgclocals·d9578cf05e73f94c5bc1acfa30cff71f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".ClearàÈeH‹ %H;awèëêHƒì(HH‰$èH‹D$0H‰D$H‰D$ HH‰$H‹H‰\$H\$ H‰\$èH‹\$H‰\$ HH‰$H‹H‰\$H\$ H‰\$èHH‰$èHƒÄ(à + 0runtime.morestack_noctxt:"".mutexL(sync.(*RWMutex).Lockxptype.map[*net/http.Request]map[interface {}]interface {}Ž"".data¶"runtime.mapdeleteØ@type.map[*net/http.Request]int64î"".datat–"runtime.mapdelete¤"".mutex¶,sync.(*RWMutex).UnlockP"".autotmp_0047,type.*net/http.Request"".autotmp_0046,type.*net/http.Request"".r,type.*net/http.Request"".r,type.*net/http.RequestP‰O °Àe%50Tgclocals·15395a9df917b4c9aa74d5c6c7e1ebf4Tgclocals·ab0f5354c6e12d990f77504eee3efe59¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".clear€þeH‹ %H;awèëêHƒì H‹\$(H‰\$HH‰$H‹H‰\$H\$H‰\$èH‹\$(H‰\$HH‰$H‹H‰\$H\$H‰\$èHƒÄ à + 0runtime.morestack_noctxtNptype.map[*net/http.Request]map[interface {}]interface {}d"".dataŒ"runtime.mapdelete®@type.map[*net/http.Request]int64Ä"".datatì"runtime.mapdelete@"".autotmp_0049,type.*net/http.Request"".autotmp_0048,type.*net/http.Request"".r,type.*net/http.Request@d?€Î00 +E;Tgclocals·ac5bea9c8a91f5fb1d31bdacc5067b57Tgclocals·e1ae6533a9e39048ba0735a2264ce16a¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".Purge  Ž eH‹ %HD$ÐH;AwèëåHì°HH‰$èHÇD$(H‹œ$¸Hƒû H‹HƒûtH‹H‰\$(HH‰$HÇD$èH‹D$HH‰$H‰D$èHH‰$HÇD$èH‹D$HH‰$H‰D$èHH‰$èH‹\$(H‰œ$ÀHÄ°ÃèH‹$‹L$H‹D$‰L$PH‰D$XH‰\$HH½ nˆñÿÿÿHëH‹¬$¸H)ëH‰\$ H‹ H|$`1ÀèHH‰$H‰L$H\$`H‰\$èH‹\$`1íH9ë„ÔH‹\$`H‹+H‰l$0HH‰$H‹H‰\$H‰l$èH‹\$H‹H‹l$ H9ë•H‹D$0H‰D$8H‰D$@HH‰$H‹H‰\$H\$@H‰\$èH‹\$8H‰\$@HH‰$H‹H‰\$H\$@H‰\$èH‹\$(HÿÃH‰\$(H\$`H‰$èH‹\$`1íH9ë…,ÿÿÿéƒþÿÿëÛ< +*0runtime.morestack_noctxtJ"".mutex\(sync.(*RWMutex).Lock "".dataÊptype.map[*net/http.Request]map[interface {}]interface {}îruntime.makemap†"".data¢.runtime.writebarrierptr°@type.map[*net/http.Request]int64Ôruntime.makemapì"".datatˆ.runtime.writebarrierptr–"".mutex¨,sync.(*RWMutex).UnlockÜtime.NowÚ"".dataòØ runtime.duffzero€ptype.map[*net/http.Request]map[interface {}]interface {}°&runtime.mapiterinitø@type.map[*net/http.Request]int64Ž"".datat¬2runtime.mapaccess1_fast64„ptype.map[*net/http.Request]map[interface {}]interface {}š"".dataÂ"runtime.mapdeleteä@type.map[*net/http.Request]int64ú"".datat¢"runtime.mapdeleteØ&runtime.mapiternext à"".autotmp_0060type.int"".autotmp_0059,type.*net/http.Request"".autotmp_0058ß,type.*net/http.Request"".autotmp_0057type.int64"".autotmp_0055Ÿztype.map.iter[*net/http.Request]map[interface {}]interface {}"".autotmp_0054ptype.map[*net/http.Request]map[interface {}]interface {}"".rï,type.*net/http.Requesttime.t·2Ïtype.time.Time"".rÿ,type.*net/http.Request "".minŸtype.int64"".counttype.int "".~r1type.int"".maxAgetype.int"àÊßàãÐHê" 33=J<e   -ª>K0%Tgclocals·844ab127e744730859f0523d83a2c61dTgclocals·d7104e385b089a9888fc486f3f965182¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".ClearHandler€òeH‹ %H;awèëêHƒì8HH‰$èH‹|$H‰|$0Hl$@H‰îH¥H¥HÇD$PHÇD$XHH‰$èH‹D$H-H‰(H‰D$ H‰$Hƒ<$toHƒ$H‹\$0H‰\$èH‹\$ H‰\$(H‹1íH9ètH‹\$(H‰\$XH‰D$PHƒÄ8ÃHH‰$HH‰\$HH‰\$èH‹D$뽉%ëˆ + 0runtime.morestack_noctxt:*type.net/http.HandlerL"runtime.newobjectª^type.struct { F uintptr; A0 *net/http.Handler }¼"runtime.newobjectÔ"".func·001¢.runtime.writebarrierptrÄZgo.itab.net/http.HandlerFunc.net/http.Handlerˆ2type.net/http.HandlerFuncž*type.net/http.Handler¶Zgo.itab.net/http.HandlerFunc.net/http.HandlerÊ runtime.typ2Itab@p"".autotmp_0063/`type.*struct { F uintptr; A0 *net/http.Handler }"".autotmp_00622type.net/http.HandlerFunc +"".&h,type.*net/http.Handler "".~r1 *type.net/http.Handlerp¦op?€–R®%83TTgclocals·2532067b4c791f17d2eaa4ddadeea3c0Tgclocals·f71d58207dae87d05175ac11727cdd3b¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".func·001À¬eH‹ %H;awèëêHƒì8H‹ZH‰\$ H‹\$PH‰$H QjèYYH…ÀuIH‹\$ H‹H‹kH‹\$@H‰\$H‹\$HH‰\$H‹\$PH‰\$H‰l$0H‰,$H‰T$(H‹Z ÿÓèHƒÄ8ÃèHƒÄ8à + "runtime.morestack^"".Clear·fn"runtime.deferprocü +„&runtime.deferreturnš&runtime.deferreturn0p +"".&h/,type.*net/http.Handler"".r ,type.*net/http.Request"".w8type.net/http.ResponseWriter&pMop +o  ˜#> 6H"Tgclocals·268041cca0e36eeedf29dd117f06a485Tgclocals·61e2515c69061b8fed0e66ece719f936¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ"".init ŒeH‹ %H;awèëêHƒì¶€ût¶€ûuHƒÄÃè ÆèèèHH‰$HÇD$èH‹D$HH‰$H‰D$èHH‰$HÇD$èH‹D$HH‰$H‰D$èÆHƒÄÃ$ + 0runtime.morestack_noctxt:"".initdone·R"".initdone·p"runtime.throwinit€"".initdone·Œtime.init–sync.init net/http.init®ptype.map[*net/http.Request]map[interface {}]interface {}Òruntime.makemapê"".data†.runtime.writebarrierptr”@type.map[*net/http.Request]int64¸runtime.makemapÐ"".datatì.runtime.writebarrierptrø"".initdone·00/0Ž/ Tf¦T…33„ 7™Tgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·3280bececceccd33cb74587feedb1f9f®/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ4type..hash.[8]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0071type.int"".autotmp_0070type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[8]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþ0type..eq.[8]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0075?"type.interface {}"".autotmp_0074"type.interface {}"".autotmp_0073_type.int"".autotmp_0072Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[8]interface {}"".p*type.*[8]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¶/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.goþTgclocals·341ab1f3c66e663a93f22f10d9eab46d00 +ðþTgclocals·1245bf52b89d91ace0efa42912a5da6200 +þþþþþTgclocals·4ac5edf299a8cadc46c808258e6fc6d0((ðþTgclocals·473d4314ba155bc5d9af9ad66f1c242a(( +>>>þTgclocals·4ac5edf299a8cadc46c808258e6fc6d0((ðþTgclocals·a5bd3918675a24472245dfa9083d6dee(( >>>þTgclocals·af2d3162727927f9557359011162d41c``,ªZÿªZþTgclocals·7ba969af8c72fca351526f5bd553df3688þTgclocals·af2d3162727927f9557359011162d41c``,ªZÿªZþTgclocals·b46c7a32cd3cbdb99d262657bbb5cb4688þTgclocals·d9578cf05e73f94c5bc1acfa30cff71f þTgclocals·e6f65f4d24a282ff6c0e03aa9f867bfe >>þTgclocals·ab0f5354c6e12d990f77504eee3efe59(( +þTgclocals·15395a9df917b4c9aa74d5c6c7e1ebf4((þTgclocals·e1ae6533a9e39048ba0735a2264ce16a þTgclocals·ac5bea9c8a91f5fb1d31bdacc5067b57 þTgclocals·d7104e385b089a9888fc486f3f96518288  ªU ªU( ªU  ªUþTgclocals·844ab127e744730859f0523d83a2c61d88þ,Zgo.itab.net/http.HandlerFunc.net/http.HandlerþTgclocals·f71d58207dae87d05175ac11727cdd3b00 þTgclocals·2532067b4c791f17d2eaa4ddadeea3c000 þTgclocals·61e2515c69061b8fed0e66ece719f936 þTgclocals·268041cca0e36eeedf29dd117f06a485 ++þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·3280bececceccd33cb74587feedb1f9fþ,"".mutex0"type.sync.RWMutexþ*"".dataptype.map[*net/http.Request]map[interface {}]interface {}þ*"".datat@type.map[*net/http.Request]int64þ,"".initdone·type.uint8þ"".Set·f "".Setþ.sync.(*RWMutex).Lock·f(sync.(*RWMutex).Lockþ8runtime.mapaccess1_fast64·f2runtime.mapaccess1_fast64þ$runtime.makemap·fruntime.makemapþ*runtime.mapassign1·f$runtime.mapassign1þtime.Now·ftime.Nowþ2sync.(*RWMutex).Unlock·f,sync.(*RWMutex).Unlockþ"".Get·f "".Getþ0sync.(*RWMutex).RLock·f*sync.(*RWMutex).RLockþ*runtime.mapaccess1·f$runtime.mapaccess1þ4sync.(*RWMutex).RUnlock·f.sync.(*RWMutex).RUnlockþ,runtime.throwreturn·f&runtime.throwreturnþ"".GetOk·f"".GetOkþ8runtime.mapaccess2_fast64·f2runtime.mapaccess2_fast64þ*runtime.mapaccess2·f$runtime.mapaccess2þ"".GetAll·f"".GetAllþ,runtime.mapiterinit·f&runtime.mapiterinitþ,runtime.mapiternext·f&runtime.mapiternextþ"".GetAllOk·f"".GetAllOkþ"".Delete·f"".Deleteþ(runtime.mapdelete·f"runtime.mapdeleteþ"".Clear·f"".Clearþ"".clear·f"".clearþ"".Purge·f"".Purgeþ4runtime.writebarrierptr·f.runtime.writebarrierptrþ$"".ClearHandler·f"".ClearHandlerþ(runtime.newobject·f"runtime.newobjectþ"".func·001·f"".func·001þ&runtime.typ2Itab·f runtime.typ2Itabþ(runtime.deferproc·f"runtime.deferprocþ,runtime.deferreturn·f&runtime.deferreturnþ"".init·f"".initþ(runtime.throwinit·f"runtime.throwinitþtime.init·ftime.initþsync.init·fsync.initþ net/http.init·fnet/http.initþbruntime.gcbits.0xcc000000000000000000000000000000 Ìþ0go.string."interface {}"@: interface {} 0go.string."interface {}"þ"type.interface {}ÀÀçW  € runtime.algarray0bruntime.gcbits.0xcc000000000000000000000000000000P0go.string."interface {}"p4go.weak.type.*interface {}€"runtime.zerovalueÀ"type.interface {}þbruntime.gcbits.0x48844400000000000000000000000000 H„Dþ4go.string."[]interface {}"@>[]interface {} 4go.string."[]interface {}"þ&type.[]interface {}  p“ê/   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]interface {}"p8go.weak.type.*[]interface {}€"runtime.zerovalue"type.interface {}þRgo.typelink.[]interface {}/[]interface {}&type.[]interface {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.typelink.[]uintptr/[]uintptrtype.[]uintptrþ,go.string."[4]uintptr"@6 +[4]uintptr ,go.string."[4]uintptr"þtype.[4]uintptrÀÀ l<‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P,go.string."[4]uintptr"p0go.weak.type.*[4]uintptr€"runtime.zerovaluetype.uintptr type.[]uintptrþBgo.typelink.[4]uintptr/[4]uintptrtype.[4]uintptrþbruntime.gcbits.0x88888844440000000000000000000000 ˆˆˆDDþ\go.string."map.iter[interface {}]interface {}"pf"map.iter[interface {}]interface {} \go.string."map.iter[interface {}]interface {}"þgo.string."key"0(key go.string."key"þgo.string."val"0(val go.string."val"þgo.string."t"0$t go.string."t"þgo.string."h"0$h go.string."h"þ go.string."bptr"0*bptr go.string."bptr"þ"go.string."other"0,other "go.string."other"þNtype.map.iter[interface {}]interface {}ððPe@˜ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000P\go.string."map.iter[interface {}]interface {}"p`go.weak.type.*map.iter[interface {}]interface {}€"runtime.zerovalueÀNtype.map.iter[interface {}]interface {}Àgo.string."key"à$type.*interface {}go.string."val"°$type.*interface {}àgo.string."t"€type.*uint8°go.string."h"ÐNtype.*map.hdr[interface {}]interface {}€&go.string."buckets" Ttype.*map.bucket[interface {}]interface {}Ð go.string."bptr"ðTtype.*map.bucket[interface {}]interface {} "go.string."other"Àtype.[4]uintptrþ4go.string."**http.Request"@>**http.Request 4go.string."**http.Request"þ.type.**net/http.Request  "g·6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."**http.Request"p@go.weak.type.***net/http.Request€"runtime.zerovalue,type.*net/http.Requestþ€go.string."*map.hdr[*http.Request]map[interface {}]interface {}"Š4*map.hdr[*http.Request]map[interface {}]interface {} €go.string."*map.hdr[*http.Request]map[interface {}]interface {}"þztype.*map.hdr[*net/http.Request]map[interface {}]interface {}  ýG¯®6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P€go.string."*map.hdr[*http.Request]map[interface {}]interface {}"pŒgo.weak.type.**map.hdr[*net/http.Request]map[interface {}]interface {}€"runtime.zerovaluextype.map.hdr[*net/http.Request]map[interface {}]interface {}þ€go.string."map.iter[*http.Request]map[interface {}]interface {}"Š4map.iter[*http.Request]map[interface {}]interface {} €go.string."map.iter[*http.Request]map[interface {}]interface {}"þztype.map.iter[*net/http.Request]map[interface {}]interface {}ððPäÞt (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000P€go.string."map.iter[*http.Request]map[interface {}]interface {}"pŒgo.weak.type.*map.iter[*net/http.Request]map[interface {}]interface {}€"runtime.zerovalueÀztype.map.iter[*net/http.Request]map[interface {}]interface {}Àgo.string."key"à.type.**net/http.Requestgo.string."val"°Ftype.*map[interface {}]interface {}àgo.string."t"€type.*uint8°go.string."h"Ðztype.*map.hdr[*net/http.Request]map[interface {}]interface {}€&go.string."buckets" €type.*map.bucket[*net/http.Request]map[interface {}]interface {}Ð go.string."bptr"ð€type.*map.bucket[*net/http.Request]map[interface {}]interface {} "go.string."other"Àtype.[4]uintptrþbruntime.gcbits.0x84000000000000000000000000000000 „þdgo.string."struct { F uintptr; A0 *http.Handler }"pn&struct { F uintptr; A0 *http.Handler } dgo.string."struct { F uintptr; A0 *http.Handler }"þgo.string."F"0$F go.string."F"þgo.string."A0"0&A0 go.string."A0"þ^type.struct { F uintptr; A0 *net/http.Handler }àà¢åK À runtime.algarray0bruntime.gcbits.0x84000000000000000000000000000000Pdgo.string."struct { F uintptr; A0 *http.Handler }"ppgo.weak.type.*struct { F uintptr; A0 *net/http.Handler }€"runtime.zerovalueÀ^type.struct { F uintptr; A0 *net/http.Handler }Àgo.string."F"àtype.uintptrgo.string."A0"°,type.*net/http.Handlerþfgo.string."*struct { F uintptr; A0 *http.Handler }"pp'*struct { F uintptr; A0 *http.Handler } fgo.string."*struct { F uintptr; A0 *http.Handler }"þ`type.*struct { F uintptr; A0 *net/http.Handler }  –8‚Ï6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."*struct { F uintptr; A0 *http.Handler }"prgo.weak.type.**struct { F uintptr; A0 *net/http.Handler }€"runtime.zerovalue^type.struct { F uintptr; A0 *net/http.Handler }þ8go.string."*[8]interface {}"PB*[8]interface {} 8go.string."*[8]interface {}"þ*type.*[8]interface {}  ‰aK6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*[8]interface {}"p +__.PKGDEF 0 0 0 644 69882 ` +go object darwin amd64 go1.4.2 X:precisestack + +$$ +package mux + import runtime "runtime" + import url "net/url" + import errors "errors" + import http "net/http" + import strconv "strconv" + import strings "strings" + import fmt "fmt" + import context "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" + import regexp "regexp" + import bytes "bytes" + import path "path" + import io "io" // indirect + type @"io".Writer interface { Write(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"net/http".keyValues struct { @"net/http".key string; @"net/http".values []string } + type @"net/http".headerSorter struct { @"net/http".kvs []@"net/http".keyValues } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Len () (? int) { return len(@"net/http".s·2.@"net/http".kvs) } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Less (@"net/http".i·3 int, @"net/http".j·4 int) (? bool) { return @"net/http".s·2.@"net/http".kvs[@"net/http".i·3].@"net/http".key < @"net/http".s·2.@"net/http".kvs[@"net/http".j·4].@"net/http".key } + func (@"net/http".s·1 *@"net/http".headerSorter "esc:0x0") Swap (@"net/http".i·2 int, @"net/http".j·3 int) { @"net/http".s·1.@"net/http".kvs[@"net/http".i·2], @"net/http".s·1.@"net/http".kvs[@"net/http".j·3] = @"net/http".s·1.@"net/http".kvs[@"net/http".j·3], @"net/http".s·1.@"net/http".kvs[@"net/http".i·2] } + type @"net/http".Header map[string][]string + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Add (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Del (@"net/http".key·2 string "esc:0x0") + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Get (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Set (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") WriteSubset (@"net/http".w·3 @"io".Writer, @"net/http".exclude·4 map[string]bool "esc:0x0") (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".clone () (? @"net/http".Header) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".get (@"net/http".key·3 string "esc:0x0") (? string) { var @"net/http".v·4 []string; ; @"net/http".v·4 = @"net/http".h·2[@"net/http".key·3]; if len(@"net/http".v·4) > 0x0 { return @"net/http".v·4[0x0] }; return "" } + func (@"net/http".h·3 @"net/http".Header "esc:0x0") @"net/http".sortedKeyValues (@"net/http".exclude·4 map[string]bool "esc:0x0") (@"net/http".kvs·1 []@"net/http".keyValues, @"net/http".hs·2 *@"net/http".headerSorter) + type @"net/http".ResponseWriter interface { Header() (? @"net/http".Header); Write(? []byte) (? int, ? error); WriteHeader(? int) } + type @"net/url".Userinfo struct { @"net/url".username string; @"net/url".password string; @"net/url".passwordSet bool } + func (@"net/url".u·3 *@"net/url".Userinfo "esc:0x1") Password () (? string, ? bool) { if @"net/url".u·3.@"net/url".passwordSet { return @"net/url".u·3.@"net/url".password, true }; return "", false } + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") String () (? string) + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") Username () (? string) { return @"net/url".u·2.@"net/url".username } + type @"net/url".Values map[string][]string + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Add (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = append(@"net/url".v·1[@"net/url".key·2], @"net/url".value·3) } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Del (@"net/url".key·2 string "esc:0x0") { delete(@"net/url".v·1, @"net/url".key·2) } + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Encode () (? string) + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Get (@"net/url".key·3 string "esc:0x0") (? string) { if @"net/url".v·2 == nil { return "" }; var @"net/url".vs·4 []string; ; var @"net/url".ok·5 bool; ; @"net/url".vs·4, @"net/url".ok·5 = @"net/url".v·2[@"net/url".key·3]; if !@"net/url".ok·5 || len(@"net/url".vs·4) == 0x0 { return "" }; return @"net/url".vs·4[0x0] } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Set (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = ([]string{ 0x0:@"net/url".value·3 }) } + type @"net/url".URL struct { Scheme string; Opaque string; User *@"net/url".Userinfo; Host string; Path string; RawQuery string; Fragment string } + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") IsAbs () (? bool) { return @"net/url".u·2.Scheme != "" } + func (@"net/url".u·3 *@"net/url".URL "esc:0x2") Parse (@"net/url".ref·4 string) (? *@"net/url".URL, ? error) + func (@"net/url".u·2 *@"net/url".URL) Query () (? @"net/url".Values) + func (@"net/url".u·2 *@"net/url".URL "esc:0x1") RequestURI () (? string) + func (@"net/url".u·2 *@"net/url".URL "esc:0x2") ResolveReference (@"net/url".ref·3 *@"net/url".URL "esc:0x2") (? *@"net/url".URL) + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") String () (? string) + type @"io".ReadCloser interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error) } + import multipart "mime/multipart" // indirect + import textproto "net/textproto" // indirect + type @"net/textproto".MIMEHeader map[string][]string + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Add (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Del (@"net/textproto".key·2 string "esc:0x0") + func (@"net/textproto".h·2 @"net/textproto".MIMEHeader "esc:0x0") Get (@"net/textproto".key·3 string "esc:0x0") (? string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Set (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + type @"mime/multipart".File interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error); ReadAt(@"io".p []byte, @"io".off int64) (@"io".n int, @"io".err error); Seek(@"io".offset int64, @"io".whence int) (? int64, ? error) } + type @"mime/multipart".FileHeader struct { Filename string; Header @"net/textproto".MIMEHeader; @"mime/multipart".content []byte; @"mime/multipart".tmpfile string } + func (@"mime/multipart".fh·3 *@"mime/multipart".FileHeader) Open () (? @"mime/multipart".File, ? error) + type @"mime/multipart".Form struct { Value map[string][]string; File map[string][]*@"mime/multipart".FileHeader } + func (@"mime/multipart".f·2 *@"mime/multipart".Form "esc:0x0") RemoveAll () (? error) + import tls "crypto/tls" // indirect + import x509 "crypto/x509" // indirect + type @"crypto/x509".SignatureAlgorithm int + type @"crypto/x509".PublicKeyAlgorithm int + import big "math/big" // indirect + type @"math/big".Word uintptr + type @"math/big".divisor struct { @"math/big".bbb @"math/big".nat; @"math/big".nbits int; @"math/big".ndigits int } + import rand "math/rand" // indirect + type @"math/rand".Source interface { Int63() (? int64); Seed(@"math/rand".seed int64) } + type @"math/rand".Rand struct { @"math/rand".src @"math/rand".Source } + func (@"math/rand".r·2 *@"math/rand".Rand) ExpFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Float32 () (? float32) + func (@"math/rand".r·2 *@"math/rand".Rand) Float64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int () (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31 () (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31n (@"math/rand".n·3 int32) (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63 () (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63n (@"math/rand".n·3 int64) (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Intn (@"math/rand".n·3 int) (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) NormFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Perm (@"math/rand".n·3 int) (? []int) + func (@"math/rand".r·1 *@"math/rand".Rand) Seed (@"math/rand".seed·2 int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Uint32 () (? uint32) + type @"io".RuneScanner interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error); UnreadRune() (? error) } + type @"math/big".nat []@"math/big".Word + func (@"math/big".z·2 @"math/big".nat) @"math/big".add (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".and (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".andNot (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bit (@"math/big".i·3 uint) (? uint) { var @"math/big".j·4 int; ; @"math/big".j·4 = int(@"math/big".i·3 / 0x40); if @"math/big".j·4 >= len(@"math/big".z·2) { return 0x0 }; return uint(@"math/big".z·2[@"math/big".j·4] >> (@"math/big".i·3 % 0x40) & @"math/big".Word(0x1)) } + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".bitLen () (? int) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bytes (@"math/big".buf·3 []byte "esc:0x0") (@"math/big".i·1 int) + func (@"math/big".z·1 @"math/big".nat "esc:0x0") @"math/big".clear () + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".cmp (@"math/big".y·3 @"math/big".nat "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".q·1 @"math/big".nat) @"math/big".convertWords (@"math/big".s·2 []byte "esc:0x0", @"math/big".charset·3 string "esc:0x0", @"math/big".b·4 @"math/big".Word, @"math/big".ndigits·5 int, @"math/big".bb·6 @"math/big".Word, @"math/big".table·7 []@"math/big".divisor "esc:0x0") + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".decimalString () (? string) + func (@"math/big".z·3 @"math/big".nat) @"math/big".div (@"math/big".z2·4 @"math/big".nat, @"math/big".u·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat "esc:0x2") @"math/big".divLarge (@"math/big".u·4 @"math/big".nat, @"math/big".uIn·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat) @"math/big".divW (@"math/big".x·4 @"math/big".nat, @"math/big".y·5 @"math/big".Word) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNN (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNNWindowed (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expWW (@"math/big".x·3 @"math/big".Word, @"math/big".y·4 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".make (@"math/big".n·3 int) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat) @"math/big".modW (@"math/big".d·3 @"math/big".Word) (@"math/big".r·1 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mul (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulAddWW (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".Word, @"math/big".r·5 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulRange (@"math/big".a·3 uint64, @"math/big".b·4 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".norm () (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".or (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".n·2 @"math/big".nat) @"math/big".probablyPrime (@"math/big".reps·3 int) (? bool) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".random (@"math/big".rand·3 *@"math/rand".Rand, @"math/big".limit·4 @"math/big".nat "esc:0x0", @"math/big".n·5 int) (? @"math/big".nat) + func (@"math/big".z·4 @"math/big".nat) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? @"math/big".nat, ? int, ? error) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".set (@"math/big".x·3 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBit (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".i·4 uint, @"math/big".b·5 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBytes (@"math/big".buf·3 []byte "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setUint64 (@"math/big".x·3 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setWord (@"math/big".x·3 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shl (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shr (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".string (@"math/big".charset·3 string "esc:0x0") (? string) + func (@"math/big".z·2 @"math/big".nat) @"math/big".sub (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".trailingZeroBits () (? uint) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".xor (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + type @"fmt".State interface { Flag(@"fmt".c int) (? bool); Precision() (@"fmt".prec int, @"fmt".ok bool); Width() (@"fmt".wid int, @"fmt".ok bool); Write(@"fmt".b []byte) (@"fmt".ret int, @"fmt".err error) } + type @"fmt".ScanState interface { Read(@"fmt".buf []byte) (@"fmt".n int, @"fmt".err error); ReadRune() (@"fmt".r rune, @"fmt".size int, @"fmt".err error); SkipSpace(); Token(@"fmt".skipSpace bool, @"fmt".f func(? rune) (? bool)) (@"fmt".token []byte, @"fmt".err error); UnreadRune() (? error); Width() (@"fmt".wid int, @"fmt".ok bool) } + type @"math/big".Int struct { @"math/big".neg bool; @"math/big".abs @"math/big".nat } + func (@"math/big".z·2 *@"math/big".Int) Abs (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Add (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) And (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) AndNot (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Binomial (@"math/big".n·3 int64, @"math/big".k·4 int64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) Bit (@"math/big".i·3 int) (? uint) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") BitLen () (? int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x1") Bits () (? []@"math/big".Word) { return @"math/big".x·2.@"math/big".abs } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Bytes () (? []byte) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Cmp (@"math/big".y·3 *@"math/big".Int "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".z·2 *@"math/big".Int) Div (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) DivMod (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".m·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Exp (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int "esc:0x0", @"math/big".m·5 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·1 *@"math/big".Int "esc:0x0") Format (@"math/big".s·2 @"fmt".State, @"math/big".ch·3 rune) + func (@"math/big".z·2 *@"math/big".Int) GCD (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int, @"math/big".a·5 *@"math/big".Int, @"math/big".b·6 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) GobDecode (@"math/big".buf·3 []byte "esc:0x0") (? error) + func (@"math/big".x·3 *@"math/big".Int "esc:0x0") GobEncode () (? []byte, ? error) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Int64 () (? int64) + func (@"math/big".z·2 *@"math/big".Int) Lsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalText () (@"math/big".text·1 []byte, @"math/big".err·2 error) + func (@"math/big".z·2 *@"math/big".Int) Mod (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) ModInverse (@"math/big".g·3 *@"math/big".Int, @"math/big".n·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Mul (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) MulRange (@"math/big".a·3 int64, @"math/big".b·4 int64) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Neg (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Not (@"math/big".x·3 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Or (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) ProbablyPrime (@"math/big".n·3 int) (? bool) + func (@"math/big".z·2 *@"math/big".Int) Quo (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) QuoRem (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".r·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rand (@"math/big".rnd·3 *@"math/rand".Rand, @"math/big".n·4 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rem (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Scan (@"math/big".s·3 @"fmt".ScanState, @"math/big".ch·4 rune) (? error) + func (@"math/big".z·2 *@"math/big".Int) Set (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBit (@"math/big".x·3 *@"math/big".Int, @"math/big".i·4 int, @"math/big".b·5 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int "esc:0x2") SetBits (@"math/big".abs·3 []@"math/big".Word) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBytes (@"math/big".buf·3 []byte "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetInt64 (@"math/big".x·3 int64) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) SetString (@"math/big".s·4 string, @"math/big".base·5 int) (? *@"math/big".Int, ? bool) + func (@"math/big".z·2 *@"math/big".Int) SetUint64 (@"math/big".x·3 uint64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Sign () (? int) { if len(@"math/big".x·2.@"math/big".abs) == 0x0 { return 0x0 }; if @"math/big".x·2.@"math/big".neg { return -0x1 }; return 0x1 } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") String () (? string) + func (@"math/big".z·2 *@"math/big".Int) Sub (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Uint64 () (? uint64) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalJSON (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalText (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) Xor (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) @"math/big".binaryGCD (@"math/big".a·3 *@"math/big".Int, @"math/big".b·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·4 *@"math/big".Int) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? *@"math/big".Int, ? int, ? error) + import pkix "crypto/x509/pkix" // indirect + import asn1 "encoding/asn1" // indirect + type @"encoding/asn1".ObjectIdentifier []int + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") Equal (@"encoding/asn1".other·3 @"encoding/asn1".ObjectIdentifier "esc:0x0") (? bool) + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") String () (? string) + type @"crypto/x509/pkix".AttributeTypeAndValue struct { Type @"encoding/asn1".ObjectIdentifier; Value interface {} } + type @"crypto/x509/pkix".RelativeDistinguishedNameSET []@"crypto/x509/pkix".AttributeTypeAndValue + type @"crypto/x509/pkix".RDNSequence []@"crypto/x509/pkix".RelativeDistinguishedNameSET + type @"crypto/x509/pkix".Name struct { Country []string; Organization []string; OrganizationalUnit []string; Locality []string; Province []string; StreetAddress []string; PostalCode []string; SerialNumber string; CommonName string; Names []@"crypto/x509/pkix".AttributeTypeAndValue } + func (@"crypto/x509/pkix".n·1 *@"crypto/x509/pkix".Name) FillFromRDNSequence (@"crypto/x509/pkix".rdns·2 *@"crypto/x509/pkix".RDNSequence "esc:0x0") + func (@"crypto/x509/pkix".n·2 @"crypto/x509/pkix".Name) ToRDNSequence () (@"crypto/x509/pkix".ret·1 @"crypto/x509/pkix".RDNSequence) + import time "time" // indirect + type @"time".zone struct { @"time".name string; @"time".offset int; @"time".isDST bool } + type @"time".zoneTrans struct { @"time".when int64; @"time".index uint8; @"time".isstd bool; @"time".isutc bool } + type @"time".Location struct { @"time".name string; @"time".zone []@"time".zone; @"time".tx []@"time".zoneTrans; @"time".cacheStart int64; @"time".cacheEnd int64; @"time".cacheZone *@"time".zone } + func (@"time".l·2 *@"time".Location "esc:0x0") String () (? string) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".firstZoneUsed () (? bool) + func (@"time".l·2 *@"time".Location "esc:0x2") @"time".get () (? *@"time".Location) + func (@"time".l·6 *@"time".Location "esc:0x1") @"time".lookup (@"time".sec·7 int64) (@"time".name·1 string, @"time".offset·2 int, @"time".isDST·3 bool, @"time".start·4 int64, @"time".end·5 int64) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".lookupFirstZone () (? int) + func (@"time".l·4 *@"time".Location "esc:0x0") @"time".lookupName (@"time".name·5 string "esc:0x0", @"time".unix·6 int64) (@"time".offset·1 int, @"time".isDST·2 bool, @"time".ok·3 bool) + type @"time".Duration int64 + func (@"time".d·2 @"time".Duration) Hours () (? float64) { var @"time".hour·3 @"time".Duration; ; @"time".hour·3 = @"time".d·2 / @"time".Duration(0x34630B8A000); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x34630B8A000); return float64(@"time".hour·3) + float64(@"time".nsec·4) * 0x9C5FFF26ED75Fp-93 } + func (@"time".d·2 @"time".Duration) Minutes () (? float64) { var @"time".min·3 @"time".Duration; ; @"time".min·3 = @"time".d·2 / @"time".Duration(0xDF8475800); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0xDF8475800); return float64(@"time".min·3) + float64(@"time".nsec·4) * 0x9299FF347E9E9p-87 } + func (@"time".d·2 @"time".Duration) Nanoseconds () (? int64) { return int64(@"time".d·2) } + func (@"time".d·2 @"time".Duration) Seconds () (? float64) { var @"time".sec·3 @"time".Duration; ; @"time".sec·3 = @"time".d·2 / @"time".Duration(0x3B9ACA00); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x3B9ACA00); return float64(@"time".sec·3) + float64(@"time".nsec·4) * 0x112E0BE826D695p-82 } + func (@"time".d·2 @"time".Duration) String () (? string) + type @"time".Month int + func (@"time".m·2 @"time".Month) String () (? string) { return @"time".months[@"time".m·2 - @"time".Month(0x1)] } + type @"time".Weekday int + func (@"time".d·2 @"time".Weekday) String () (? string) { return @"time".days[@"time".d·2] } + type @"time".Time struct { @"time".sec int64; @"time".nsec int32; @"time".loc *@"time".Location } + func (@"time".t·2 @"time".Time "esc:0x2") Add (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") AddDate (@"time".years·3 int, @"time".months·4 int, @"time".days·5 int) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") After (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec > @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec > @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Before (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec < @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec < @"time".u·3.@"time".nsec } + func (@"time".t·4 @"time".Time "esc:0x0") Clock () (@"time".hour·1 int, @"time".min·2 int, @"time".sec·3 int) + func (@"time".t·4 @"time".Time "esc:0x0") Date () (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int) + func (@"time".t·2 @"time".Time "esc:0x0") Day () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Equal (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec == @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Format (@"time".layout·3 string "esc:0x0") (? string) + func (@"time".t·2 *@"time".Time "esc:0x0") GobDecode (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·3 @"time".Time "esc:0x0") GobEncode () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Hour () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") ISOWeek () (@"time".year·1 int, @"time".week·2 int) + func (@"time".t·2 @"time".Time "esc:0x2") In (@"time".loc·3 *@"time".Location "esc:0x2") (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") IsZero () (? bool) { return @"time".t·2.@"time".sec == 0x0 && @"time".t·2.@"time".nsec == 0x0 } + func (@"time".t·2 @"time".Time "esc:0x2") Local () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".Local; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x2") Location () (? *@"time".Location) { var @"time".l·3 *@"time".Location; ; @"time".l·3 = @"time".t·2.@"time".loc; if @"time".l·3 == nil { @"time".l·3 = @"time".UTC }; return @"time".l·3 } + func (@"time".t·3 @"time".Time "esc:0x0") MarshalBinary () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalText () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Minute () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Month () (? @"time".Month) + func (@"time".t·2 @"time".Time "esc:0x0") Nanosecond () (? int) { return int(@"time".t·2.@"time".nsec) } + func (@"time".t·2 @"time".Time "esc:0x2") Round (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") Second () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") String () (? string) + func (@"time".t·2 @"time".Time "esc:0x0") Sub (@"time".u·3 @"time".Time "esc:0x0") (? @"time".Duration) + func (@"time".t·2 @"time".Time "esc:0x2") Truncate (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") UTC () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".UTC; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x0") Unix () (? int64) { return @"time".t·2.@"time".sec + -0xE7791F700 } + func (@"time".t·2 @"time".Time "esc:0x0") UnixNano () (? int64) { return (@"time".t·2.@"time".sec + -0xE7791F700) * 0x3B9ACA00 + int64(@"time".t·2.@"time".nsec) } + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalBinary (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalJSON (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalText (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 @"time".Time "esc:0x0") Weekday () (? @"time".Weekday) + func (@"time".t·2 @"time".Time "esc:0x0") Year () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") YearDay () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") Zone () (@"time".name·1 string, @"time".offset·2 int) + func (@"time".t·2 @"time".Time "esc:0x0") @"time".abs () (? uint64) + func (@"time".t·5 @"time".Time "esc:0x0") @"time".date (@"time".full·6 bool) (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int, @"time".yday·4 int) + func (@"time".t·4 @"time".Time "esc:0x1") @"time".locabs () (@"time".name·1 string, @"time".offset·2 int, @"time".abs·3 uint64) + type @"crypto/x509".KeyUsage int + type @"crypto/x509/pkix".Extension struct { Id @"encoding/asn1".ObjectIdentifier; Critical bool "asn1:\"optional\""; Value []byte } + type @"crypto/x509".ExtKeyUsage int + import net "net" // indirect + type @"net".IPMask []byte + func (@"net".m·3 @"net".IPMask "esc:0x0") Size () (@"net".ones·1 int, @"net".bits·2 int) + func (@"net".m·2 @"net".IPMask "esc:0x0") String () (? string) + type @"net".IP []byte + func (@"net".ip·2 @"net".IP "esc:0x0") DefaultMask () (? @"net".IPMask) + func (@"net".ip·2 @"net".IP "esc:0x0") Equal (@"net".x·3 @"net".IP "esc:0x0") (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsGlobalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsInterfaceLocalMulticast () (? bool) { return len(@"net".ip·2) == 0x10 && @"net".ip·2[0x0] == byte(0xFF) && @"net".ip·2[0x1] & byte(0xF) == byte(0x1) } + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLoopback () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsUnspecified () (? bool) + func (@"net".ip·3 @"net".IP "esc:0x0") MarshalText () (? []byte, ? error) + func (@"net".ip·2 @"net".IP "esc:0x0") Mask (@"net".mask·3 @"net".IPMask "esc:0x0") (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x0") String () (? string) + func (@"net".ip·2 @"net".IP "esc:0x2") To16 () (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x2") To4 () (? @"net".IP) + func (@"net".ip·2 *@"net".IP "esc:0x0") UnmarshalText (@"net".text·3 []byte "esc:0x0") (? error) + type @"encoding/asn1".RawContent []byte + type @"encoding/asn1".RawValue struct { Class int; Tag int; IsCompound bool; Bytes []byte; FullBytes []byte } + type @"crypto/x509/pkix".AlgorithmIdentifier struct { Algorithm @"encoding/asn1".ObjectIdentifier; Parameters @"encoding/asn1".RawValue "asn1:\"optional\"" } + type @"crypto/x509/pkix".RevokedCertificate struct { SerialNumber *@"math/big".Int; RevocationTime @"time".Time; Extensions []@"crypto/x509/pkix".Extension "asn1:\"optional\"" } + type @"crypto/x509/pkix".TBSCertificateList struct { Raw @"encoding/asn1".RawContent; Version int "asn1:\"optional,default:2\""; Signature @"crypto/x509/pkix".AlgorithmIdentifier; Issuer @"crypto/x509/pkix".RDNSequence; ThisUpdate @"time".Time; NextUpdate @"time".Time "asn1:\"optional\""; RevokedCertificates []@"crypto/x509/pkix".RevokedCertificate "asn1:\"optional\""; Extensions []@"crypto/x509/pkix".Extension "asn1:\"tag:0,optional,explicit\"" } + type @"encoding/asn1".BitString struct { Bytes []byte; BitLength int } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x0") At (@"encoding/asn1".i·3 int) (? int) { if @"encoding/asn1".i·3 < 0x0 || @"encoding/asn1".i·3 >= @"encoding/asn1".b·2.BitLength { return 0x0 }; var @"encoding/asn1".x·4 int; ; @"encoding/asn1".x·4 = @"encoding/asn1".i·3 / 0x8; var @"encoding/asn1".y·5 uint; ; @"encoding/asn1".y·5 = 0x7 - uint(@"encoding/asn1".i·3 % 0x8); return int(@"encoding/asn1".b·2.Bytes[@"encoding/asn1".x·4] >> @"encoding/asn1".y·5) & 0x1 } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x2") RightAlign () (? []byte) + type @"crypto/x509/pkix".CertificateList struct { TBSCertList @"crypto/x509/pkix".TBSCertificateList; SignatureAlgorithm @"crypto/x509/pkix".AlgorithmIdentifier; SignatureValue @"encoding/asn1".BitString } + func (@"crypto/x509/pkix".certList·2 *@"crypto/x509/pkix".CertificateList "esc:0x0") HasExpired (@"crypto/x509/pkix".now·3 @"time".Time "esc:0x0") (? bool) + type @"io".Reader interface { Read(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"crypto/x509".CertPool struct { @"crypto/x509".bySubjectKeyId map[string][]int; @"crypto/x509".byName map[string][]int; @"crypto/x509".certs []*@"crypto/x509".Certificate } + func (@"crypto/x509".s·1 *@"crypto/x509".CertPool) AddCert (@"crypto/x509".cert·2 *@"crypto/x509".Certificate) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool) AppendCertsFromPEM (@"crypto/x509".pemCerts·3 []byte) (@"crypto/x509".ok·1 bool) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool "esc:0x0") Subjects () (@"crypto/x509".res·1 [][]byte) + func (@"crypto/x509".s·4 *@"crypto/x509".CertPool "esc:0x0") @"crypto/x509".findVerifiedParents (@"crypto/x509".cert·5 *@"crypto/x509".Certificate) (@"crypto/x509".parents·1 []int, @"crypto/x509".errCert·2 *@"crypto/x509".Certificate, @"crypto/x509".err·3 error) + type @"crypto/x509".VerifyOptions struct { DNSName string; Intermediates *@"crypto/x509".CertPool; Roots *@"crypto/x509".CertPool; CurrentTime @"time".Time; KeyUsages []@"crypto/x509".ExtKeyUsage } + type @"crypto/x509".Certificate struct { Raw []byte; RawTBSCertificate []byte; RawSubjectPublicKeyInfo []byte; RawSubject []byte; RawIssuer []byte; Signature []byte; SignatureAlgorithm @"crypto/x509".SignatureAlgorithm; PublicKeyAlgorithm @"crypto/x509".PublicKeyAlgorithm; PublicKey interface {}; Version int; SerialNumber *@"math/big".Int; Issuer @"crypto/x509/pkix".Name; Subject @"crypto/x509/pkix".Name; NotBefore @"time".Time; NotAfter @"time".Time; KeyUsage @"crypto/x509".KeyUsage; Extensions []@"crypto/x509/pkix".Extension; ExtraExtensions []@"crypto/x509/pkix".Extension; ExtKeyUsage []@"crypto/x509".ExtKeyUsage; UnknownExtKeyUsage []@"encoding/asn1".ObjectIdentifier; BasicConstraintsValid bool; IsCA bool; MaxPathLen int; MaxPathLenZero bool; SubjectKeyId []byte; AuthorityKeyId []byte; OCSPServer []string; IssuingCertificateURL []string; DNSNames []string; EmailAddresses []string; IPAddresses []@"net".IP; PermittedDNSDomainsCritical bool; PermittedDNSDomains []string; CRLDistributionPoints []string; PolicyIdentifiers []@"encoding/asn1".ObjectIdentifier } + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckCRLSignature (@"crypto/x509".crl·3 *@"crypto/x509/pkix".CertificateList) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignature (@"crypto/x509".algo·3 @"crypto/x509".SignatureAlgorithm, @"crypto/x509".signed·4 []byte, @"crypto/x509".signature·5 []byte) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignatureFrom (@"crypto/x509".parent·3 *@"crypto/x509".Certificate) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) CreateCRL (@"crypto/x509".rand·4 @"io".Reader, @"crypto/x509".priv·5 interface {}, @"crypto/x509".revokedCerts·6 []@"crypto/x509/pkix".RevokedCertificate, @"crypto/x509".now·7 @"time".Time, @"crypto/x509".expiry·8 @"time".Time) (@"crypto/x509".crlBytes·1 []byte, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x0") Equal (@"crypto/x509".other·3 *@"crypto/x509".Certificate "esc:0x0") (? bool) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) Verify (@"crypto/x509".opts·4 @"crypto/x509".VerifyOptions "esc:0x4") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") VerifyHostname (@"crypto/x509".h·3 string "esc:0x2") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) @"crypto/x509".buildChains (@"crypto/x509".cache·4 map[int][][]*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".currentChain·5 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·6 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") @"crypto/x509".isValid (@"crypto/x509".certType·3 int, @"crypto/x509".currentChain·4 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·5 *@"crypto/x509".VerifyOptions "esc:0x0") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate "esc:0x0") @"crypto/x509".systemVerify (@"crypto/x509".opts·4 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) { return nil, nil } + type @"crypto/tls".ConnectionState struct { Version uint16; HandshakeComplete bool; DidResume bool; CipherSuite uint16; NegotiatedProtocol string; NegotiatedProtocolIsMutual bool; ServerName string; PeerCertificates []*@"crypto/x509".Certificate; VerifiedChains [][]*@"crypto/x509".Certificate; TLSUnique []byte } + type @"net/http".Cookie struct { Name string; Value string; Path string; Domain string; Expires @"time".Time; RawExpires string; MaxAge int; Secure bool; HttpOnly bool; Raw string; Unparsed []string } + func (@"net/http".c·2 *@"net/http".Cookie) String () (? string) + import bufio "bufio" // indirect + type @"bufio".Reader struct { @"bufio".buf []byte; @"bufio".rd @"io".Reader; @"bufio".r int; @"bufio".w int; @"bufio".err error; @"bufio".lastByte int; @"bufio".lastRuneSize int } + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") Buffered () (? int) { return @"bufio".b·2.@"bufio".w - @"bufio".b·2.@"bufio".r } + func (@"bufio".b·3 *@"bufio".Reader) Peek (@"bufio".n·4 int) (? []byte, ? error) + func (@"bufio".b·3 *@"bufio".Reader) Read (@"bufio".p·4 []byte) (@"bufio".n·1 int, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadByte () (@"bufio".c·1 byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadBytes (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadLine () (@"bufio".line·1 []byte, @"bufio".isPrefix·2 bool, @"bufio".err·3 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadRune () (@"bufio".r·1 rune, @"bufio".size·2 int, @"bufio".err·3 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadSlice (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadString (@"bufio".delim·4 byte) (@"bufio".line·1 string, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) Reset (@"bufio".r·2 @"io".Reader) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadByte () (? error) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadRune () (? error) { if @"bufio".b·2.@"bufio".lastRuneSize < 0x0 || @"bufio".b·2.@"bufio".r < @"bufio".b·2.@"bufio".lastRuneSize { return @"bufio".ErrInvalidUnreadRune }; @"bufio".b·2.@"bufio".r -= @"bufio".b·2.@"bufio".lastRuneSize; @"bufio".b·2.@"bufio".lastByte = -0x1; @"bufio".b·2.@"bufio".lastRuneSize = -0x1; return nil } + func (@"bufio".b·3 *@"bufio".Reader) WriteTo (@"bufio".w·4 @"io".Writer) (@"bufio".n·1 int64, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) @"bufio".fill () + func (@"bufio".b·2 *@"bufio".Reader "esc:0x1") @"bufio".readErr () (? error) { var @"bufio".err·3 error; ; @"bufio".err·3 = @"bufio".b·2.@"bufio".err; @"bufio".b·2.@"bufio".err = nil; return @"bufio".err·3 } + func (@"bufio".b·1 *@"bufio".Reader "esc:0x0") @"bufio".reset (@"bufio".buf·2 []byte, @"bufio".r·3 @"io".Reader) { *@"bufio".b·1 = (@"bufio".Reader{ @"bufio".buf:@"bufio".buf·2, @"bufio".rd:@"bufio".r·3, @"bufio".lastByte:-0x1, @"bufio".lastRuneSize:-0x1 }) } + func (@"bufio".b·3 *@"bufio".Reader) @"bufio".writeBuf (@"bufio".w·4 @"io".Writer) (? int64, ? error) + type @"bytes".readOp int + type @"bytes".Buffer struct { @"bytes".buf []byte; @"bytes".off int; @"bytes".runeBytes [4]byte; @"bytes".bootstrap [64]byte; @"bytes".lastRead @"bytes".readOp } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Bytes () (? []byte) { return @"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:] } + func (@"bytes".b·1 *@"bytes".Buffer) Grow (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") Len () (? int) { return len(@"bytes".b·2.@"bytes".buf) - @"bytes".b·2.@"bytes".off } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Next (@"bytes".n·3 int) (? []byte) + func (@"bytes".b·3 *@"bytes".Buffer) Read (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadByte () (@"bytes".c·1 byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadBytes (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadFrom (@"bytes".r·4 @"io".Reader) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·4 *@"bytes".Buffer) ReadRune () (@"bytes".r·1 rune, @"bytes".size·2 int, @"bytes".err·3 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadString (@"bytes".delim·4 byte) (@"bytes".line·1 string, @"bytes".err·2 error) + func (@"bytes".b·1 *@"bytes".Buffer) Reset () + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") String () (? string) { if @"bytes".b·2 == nil { return "" }; return string(@"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:]) } + func (@"bytes".b·1 *@"bytes".Buffer) Truncate (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadByte () (? error) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadRune () (? error) + func (@"bytes".b·3 *@"bytes".Buffer) Write (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) WriteByte (@"bytes".c·3 byte) (? error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteRune (@"bytes".r·4 rune) (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteString (@"bytes".s·4 string "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteTo (@"bytes".w·4 @"io".Writer) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) @"bytes".grow (@"bytes".n·3 int) (? int) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x1") @"bytes".readSlice (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + type @"mime/multipart".Part struct { Header @"net/textproto".MIMEHeader; @"mime/multipart".buffer *@"bytes".Buffer; @"mime/multipart".mr *@"mime/multipart".Reader; @"mime/multipart".bytesRead int; @"mime/multipart".disposition string; @"mime/multipart".dispositionParams map[string]string; @"mime/multipart".r @"io".Reader } + func (@"mime/multipart".p·2 *@"mime/multipart".Part) Close () (? error) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FileName () (? string) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FormName () (? string) + func (@"mime/multipart".p·3 *@"mime/multipart".Part) Read (@"mime/multipart".d·4 []byte) (@"mime/multipart".n·1 int, @"mime/multipart".err·2 error) + func (@"mime/multipart".p·1 *@"mime/multipart".Part "esc:0x0") @"mime/multipart".parseContentDisposition () + func (@"mime/multipart".bp·2 *@"mime/multipart".Part) @"mime/multipart".populateHeaders () (? error) + type @"mime/multipart".Reader struct { @"mime/multipart".bufReader *@"bufio".Reader; @"mime/multipart".currentPart *@"mime/multipart".Part; @"mime/multipart".partsRead int; @"mime/multipart".nl []byte; @"mime/multipart".nlDashBoundary []byte; @"mime/multipart".dashBoundaryDash []byte; @"mime/multipart".dashBoundary []byte } + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) NextPart () (? *@"mime/multipart".Part, ? error) + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) ReadForm (@"mime/multipart".maxMemory·4 int64) (@"mime/multipart".f·1 *@"mime/multipart".Form, @"mime/multipart".err·2 error) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader) @"mime/multipart".isBoundaryDelimiterLine (@"mime/multipart".line·3 []byte "esc:0x0") (@"mime/multipart".ret·1 bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".isFinalBoundary (@"mime/multipart".line·3 []byte "esc:0x0") (? bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".peekBufferIsEmptyPart (@"mime/multipart".peek·3 []byte "esc:0x0") (? bool) + type @"net/http".Request struct { Method string; URL *@"net/url".URL; Proto string; ProtoMajor int; ProtoMinor int; Header @"net/http".Header; Body @"io".ReadCloser; ContentLength int64; TransferEncoding []string; Close bool; Host string; Form @"net/url".Values; PostForm @"net/url".Values; MultipartForm *@"mime/multipart".Form; Trailer @"net/http".Header; RemoteAddr string; RequestURI string; TLS *@"crypto/tls".ConnectionState } + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") AddCookie (@"net/http".c·2 *@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request "esc:0x0") BasicAuth () (@"net/http".username·1 string, @"net/http".password·2 string, @"net/http".ok·3 bool) + func (@"net/http".r·3 *@"net/http".Request "esc:0x0") Cookie (@"net/http".name·4 string "esc:0x0") (? *@"net/http".Cookie, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Cookies () (? []*@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request) FormFile (@"net/http".key·5 string "esc:0x0") (? @"mime/multipart".File, ? *@"mime/multipart".FileHeader, ? error) + func (@"net/http".r·2 *@"net/http".Request) FormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·3 *@"net/http".Request) MultipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request) ParseForm () (? error) + func (@"net/http".r·2 *@"net/http".Request) ParseMultipartForm (@"net/http".maxMemory·3 int64) (? error) + func (@"net/http".r·2 *@"net/http".Request) PostFormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") ProtoAtLeast (@"net/http".major·3 int, @"net/http".minor·4 int) (? bool) { return @"net/http".r·2.ProtoMajor > @"net/http".major·3 || @"net/http".r·2.ProtoMajor == @"net/http".major·3 && @"net/http".r·2.ProtoMinor >= @"net/http".minor·4 } + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Referer () (? string) + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") SetBasicAuth (@"net/http".username·2 string "esc:0x0", @"net/http".password·3 string "esc:0x0") + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") UserAgent () (? string) + func (@"net/http".r·2 *@"net/http".Request) Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·2 *@"net/http".Request) WriteProxy (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·1 *@"net/http".Request) @"net/http".closeBody () + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".expectsContinue () (? bool) + func (@"net/http".r·3 *@"net/http".Request) @"net/http".multipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsClose () (? bool) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsHttp10KeepAlive () (? bool) + func (@"net/http".req·2 *@"net/http".Request) @"net/http".write (@"net/http".w·3 @"io".Writer, @"net/http".usingProxy·4 bool, @"net/http".extraHeaders·5 @"net/http".Header "esc:0x0") (? error) + type @"net/http".Handler interface { ServeHTTP(? @"net/http".ResponseWriter, ? *@"net/http".Request) } + type @"".RouteMatch struct { Route *@"".Route; Handler @"net/http".Handler; Vars map[string]string } + type @"".matcher interface { Match(? *@"net/http".Request, ? *@"".RouteMatch) (? bool) } + import syntax "regexp/syntax" // indirect + type @"regexp/syntax".InstOp uint8 + func (@"regexp/syntax".i·2 @"regexp/syntax".InstOp) String () (? string) { if uint(@"regexp/syntax".i·2) >= uint(len(@"regexp/syntax".instOpNames)) { return "" }; return @"regexp/syntax".instOpNames[@"regexp/syntax".i·2] } + type @"regexp/syntax".Inst struct { Op @"regexp/syntax".InstOp; Out uint32; Arg uint32; Rune []rune } + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchEmptyWidth (@"regexp/syntax".before·3 rune, @"regexp/syntax".after·4 rune) (? bool) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchRune (@"regexp/syntax".r·3 rune) (? bool) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchRunePos (@"regexp/syntax".r·3 rune) (? int) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") String () (? string) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") @"regexp/syntax".op () (? @"regexp/syntax".InstOp) + type @"regexp/syntax".EmptyOp uint8 + type @"regexp/syntax".Prog struct { Inst []@"regexp/syntax".Inst; Start int; NumCap int } + func (@"regexp/syntax".p·3 *@"regexp/syntax".Prog "esc:0x0") Prefix () (@"regexp/syntax".prefix·1 string, @"regexp/syntax".complete·2 bool) + func (@"regexp/syntax".p·2 *@"regexp/syntax".Prog "esc:0x0") StartCond () (? @"regexp/syntax".EmptyOp) + func (@"regexp/syntax".p·2 *@"regexp/syntax".Prog "esc:0x0") String () (? string) + func (@"regexp/syntax".p·3 *@"regexp/syntax".Prog "esc:0x1") @"regexp/syntax".skipNop (@"regexp/syntax".pc·4 uint32) (? *@"regexp/syntax".Inst, ? uint32) + type @"regexp".onePassInst struct { ? @"regexp/syntax".Inst; Next []uint32 } + type @"regexp".onePassProg struct { Inst []@"regexp".onePassInst; Start int; NumCap int } + import sync "sync" // indirect + type @"sync".Mutex struct { @"sync".state int32; @"sync".sema uint32 } + func (@"sync".m·1 *@"sync".Mutex) Lock () + func (@"sync".m·1 *@"sync".Mutex) Unlock () + type @"regexp".thread struct { @"regexp".inst *@"regexp/syntax".Inst; @"regexp".cap []int } + type @"regexp".entry struct { @"regexp".pc uint32; @"regexp".t *@"regexp".thread } + type @"regexp".queue struct { @"regexp".sparse []uint32; @"regexp".dense []@"regexp".entry } + type @"regexp".inputBytes struct { @"regexp".str []byte } + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return true } + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) + func (@"regexp".i·3 *@"regexp".inputBytes "esc:0x0") @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"regexp".inputString struct { @"regexp".str string } + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return true } + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) + func (@"regexp".i·3 *@"regexp".inputString "esc:0x0") @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"io".RuneReader interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error) } + type @"regexp".inputReader struct { @"regexp".r @"io".RuneReader; @"regexp".atEOT bool; @"regexp".pos int } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return false } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) { return @"regexp/syntax".EmptyOp(0x0) } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) { return false } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) { return -0x1 } + func (@"regexp".i·3 *@"regexp".inputReader) @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"regexp".input interface { @"regexp".canCheckPrefix() (? bool); @"regexp".context(@"regexp".pos int) (? @"regexp/syntax".EmptyOp); @"regexp".hasPrefix(@"regexp".re *@"regexp".Regexp) (? bool); @"regexp".index(@"regexp".re *@"regexp".Regexp, @"regexp".pos int) (? int); @"regexp".step(@"regexp".pos int) (@"regexp".r rune, @"regexp".width int) } + type @"regexp".machine struct { @"regexp".re *@"regexp".Regexp; @"regexp".p *@"regexp/syntax".Prog; @"regexp".op *@"regexp".onePassProg; @"regexp".q0 @"regexp".queue; @"regexp".q1 @"regexp".queue; @"regexp".pool []*@"regexp".thread; @"regexp".matched bool; @"regexp".matchcap []int; @"regexp".inputBytes @"regexp".inputBytes; @"regexp".inputString @"regexp".inputString; @"regexp".inputReader @"regexp".inputReader } + func (@"regexp".m·2 *@"regexp".machine) @"regexp".add (@"regexp".q·3 *@"regexp".queue, @"regexp".pc·4 uint32, @"regexp".pos·5 int, @"regexp".cap·6 []int "esc:0x0", @"regexp".cond·7 @"regexp/syntax".EmptyOp, @"regexp".t·8 *@"regexp".thread) (? *@"regexp".thread) + func (@"regexp".m·2 *@"regexp".machine) @"regexp".alloc (@"regexp".i·3 *@"regexp/syntax".Inst) (? *@"regexp".thread) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".clear (@"regexp".q·2 *@"regexp".queue) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".free (@"regexp".t·2 *@"regexp".thread) { @"regexp".m·1.@"regexp".inputBytes.@"regexp".str = nil; @"regexp".m·1.@"regexp".inputString.@"regexp".str = ""; @"regexp".m·1.@"regexp".inputReader.@"regexp".r = nil; @"regexp".m·1.@"regexp".pool = append(@"regexp".m·1.@"regexp".pool, @"regexp".t·2) } + func (@"regexp".m·1 *@"regexp".machine) @"regexp".init (@"regexp".ncap·2 int) + func (@"regexp".m·2 *@"regexp".machine) @"regexp".match (@"regexp".i·3 @"regexp".input, @"regexp".pos·4 int) (? bool) + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputBytes (@"regexp".b·3 []byte) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputBytes.@"regexp".str = @"regexp".b·3; return &@"regexp".m·2.@"regexp".inputBytes } + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputReader (@"regexp".r·3 @"io".RuneReader) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputReader.@"regexp".r = @"regexp".r·3; @"regexp".m·2.@"regexp".inputReader.@"regexp".atEOT = false; @"regexp".m·2.@"regexp".inputReader.@"regexp".pos = 0x0; return &@"regexp".m·2.@"regexp".inputReader } + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputString (@"regexp".s·3 string) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputString.@"regexp".str = @"regexp".s·3; return &@"regexp".m·2.@"regexp".inputString } + func (@"regexp".m·2 *@"regexp".machine) @"regexp".onepass (@"regexp".i·3 @"regexp".input, @"regexp".pos·4 int) (? bool) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".step (@"regexp".runq·2 *@"regexp".queue, @"regexp".nextq·3 *@"regexp".queue, @"regexp".pos·4 int, @"regexp".nextPos·5 int, @"regexp".c·6 rune, @"regexp".nextCond·7 @"regexp/syntax".EmptyOp) + type @"regexp".Regexp struct { @"regexp".expr string; @"regexp".prog *@"regexp/syntax".Prog; @"regexp".onepass *@"regexp".onePassProg; @"regexp".prefix string; @"regexp".prefixBytes []byte; @"regexp".prefixComplete bool; @"regexp".prefixRune rune; @"regexp".prefixEnd uint32; @"regexp".cond @"regexp/syntax".EmptyOp; @"regexp".numSubexp int; @"regexp".subexpNames []string; @"regexp".longest bool; @"regexp".mu @"sync".Mutex; @"regexp".machine []*@"regexp".machine } + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") Expand (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 []byte "esc:0x0", @"regexp".src·5 []byte "esc:0x0", @"regexp".match·6 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") ExpandString (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 string, @"regexp".src·5 string "esc:0x0", @"regexp".match·6 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) Find (@"regexp".b·3 []byte) (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAll (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllIndex (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllString (@"regexp".s·3 string, @"regexp".n·4 int) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringIndex (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringSubmatch (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]string) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringSubmatchIndex (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllSubmatch (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllSubmatchIndex (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindIndex (@"regexp".b·3 []byte) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindReaderIndex (@"regexp".r·3 @"io".RuneReader) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindReaderSubmatchIndex (@"regexp".r·3 @"io".RuneReader) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindString (@"regexp".s·3 string) (? string) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringIndex (@"regexp".s·3 string) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringSubmatch (@"regexp".s·3 string) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringSubmatchIndex (@"regexp".s·3 string) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindSubmatch (@"regexp".b·3 []byte) (? [][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindSubmatchIndex (@"regexp".b·3 []byte) (? []int) + func (@"regexp".re·3 *@"regexp".Regexp "esc:0x1") LiteralPrefix () (@"regexp".prefix·1 string, @"regexp".complete·2 bool) { return @"regexp".re·3.@"regexp".prefix, @"regexp".re·3.@"regexp".prefixComplete } + func (@"regexp".re·1 *@"regexp".Regexp "esc:0x0") Longest () { @"regexp".re·1.@"regexp".longest = true } + func (@"regexp".re·2 *@"regexp".Regexp) Match (@"regexp".b·3 []byte) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp) MatchReader (@"regexp".r·3 @"io".RuneReader) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp) MatchString (@"regexp".s·3 string) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") NumSubexp () (? int) { return @"regexp".re·2.@"regexp".numSubexp } + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAll (@"regexp".src·3 []byte, @"regexp".repl·4 []byte "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllFunc (@"regexp".src·3 []byte, @"regexp".repl·4 func(? []byte) (? []byte) "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllLiteral (@"regexp".src·3 []byte, @"regexp".repl·4 []byte "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllLiteralString (@"regexp".src·3 string, @"regexp".repl·4 string "esc:0x0") (? string) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllString (@"regexp".src·3 string, @"regexp".repl·4 string) (? string) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllStringFunc (@"regexp".src·3 string, @"regexp".repl·4 func(? string) (? string) "esc:0x0") (? string) + func (@"regexp".re·2 *@"regexp".Regexp) Split (@"regexp".s·3 string, @"regexp".n·4 int) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x1") String () (? string) { return @"regexp".re·2.@"regexp".expr } + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x1") SubexpNames () (? []string) { return @"regexp".re·2.@"regexp".subexpNames } + func (@"regexp".re·1 *@"regexp".Regexp) @"regexp".allMatches (@"regexp".s·2 string, @"regexp".b·3 []byte, @"regexp".n·4 int, @"regexp".deliver·5 func(? []int) "esc:0x0") + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".doExecute (@"regexp".r·3 @"io".RuneReader, @"regexp".b·4 []byte, @"regexp".s·5 string, @"regexp".pos·6 int, @"regexp".ncap·7 int) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") @"regexp".expand (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 string, @"regexp".bsrc·5 []byte "esc:0x0", @"regexp".src·6 string "esc:0x0", @"regexp".match·7 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".get () (? *@"regexp".machine) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") @"regexp".pad (@"regexp".a·3 []int "esc:0x2") (? []int) + func (@"regexp".re·1 *@"regexp".Regexp) @"regexp".put (@"regexp".z·2 *@"regexp".machine) + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".replaceAll (@"regexp".bsrc·3 []byte, @"regexp".src·4 string, @"regexp".nmatch·5 int, @"regexp".repl·6 func(@"regexp".dst []byte, @"regexp".m []int) (? []byte) "esc:0x0") (? []byte) + type @"".routeRegexp struct { @"".template string; @"".matchHost bool; @"".matchQuery bool; @"".strictSlash bool; @"".regexp *@"regexp".Regexp; @"".reverse string; @"".varsN []string; @"".varsR []*@"regexp".Regexp } + func (@"".r·2 *@"".routeRegexp) Match (@"".req·3 *@"net/http".Request, @"".match·4 *@"".RouteMatch "esc:0x0") (? bool) + func (@"".r·2 *@"".routeRegexp "esc:0x0") @"".getUrlQuery (@"".req·3 *@"net/http".Request) (? string) + func (@"".r·2 *@"".routeRegexp) @"".matchQueryString (@"".req·3 *@"net/http".Request) (? bool) + func (@"".r·3 *@"".routeRegexp) @"".url (@"".values·4 map[string]string "esc:0x0") (? string, ? error) + type @"".routeRegexpGroup struct { @"".host *@"".routeRegexp; @"".path *@"".routeRegexp; @"".queries []*@"".routeRegexp } + func (@"".v·1 *@"".routeRegexpGroup) @"".setMatch (@"".req·2 *@"net/http".Request, @"".m·3 *@"".RouteMatch "esc:0x0", @"".r·4 *@"".Route "esc:0x0") + type @"".BuildVarsFunc func(? map[string]string) (? map[string]string) + type @"".MatcherFunc func(? *@"net/http".Request, ? *@"".RouteMatch) (? bool) + func (@"".m·2 @"".MatcherFunc "esc:0x0") Match (@"".r·3 *@"net/http".Request, @"".match·4 *@"".RouteMatch) (? bool) + type @"".Route struct { @"".parent @"".parentRoute; @"".handler @"net/http".Handler; @"".matchers []@"".matcher; @"".regexp *@"".routeRegexpGroup; @"".strictSlash bool; @"".buildOnly bool; @"".name string; @"".err error; @"".buildVarsFunc @"".BuildVarsFunc } + func (@"".r·2 *@"".Route "esc:0x2") BuildOnly () (? *@"".Route) { @"".r·2.@"".buildOnly = true; return @"".r·2 } + func (@"".r·2 *@"".Route "esc:0x2") BuildVarsFunc (@"".f·3 @"".BuildVarsFunc) (? *@"".Route) { @"".r·2.@"".buildVarsFunc = @"".f·3; return @"".r·2 } + func (@"".r·2 *@"".Route "esc:0x1") GetError () (? error) { return @"".r·2.@"".err } + func (@"".r·2 *@"".Route "esc:0x1") GetHandler () (? @"net/http".Handler) { return @"".r·2.@"".handler } + func (@"".r·2 *@"".Route "esc:0x1") GetName () (? string) { return @"".r·2.@"".name } + func (@"".r·2 *@"".Route "esc:0x2") Handler (@"".handler·3 @"net/http".Handler) (? *@"".Route) { if @"".r·2.@"".err == nil { @"".r·2.@"".handler = @"".handler·3 }; return @"".r·2 } + func (@"".r·2 *@"".Route "esc:0x2") HandlerFunc (@"".f·3 func(? @"net/http".ResponseWriter, ? *@"net/http".Request)) (? *@"".Route) + func (@"".r·2 *@"".Route) Headers (@"".pairs·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Route) HeadersRegexp (@"".pairs·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Route) Host (@"".tpl·3 string) (? *@"".Route) + func (@"".r·2 *@"".Route) Match (@"".req·3 *@"net/http".Request, @"".match·4 *@"".RouteMatch) (? bool) + func (@"".r·2 *@"".Route) MatcherFunc (@"".f·3 @"".MatcherFunc) (? *@"".Route) + func (@"".r·2 *@"".Route) Methods (@"".methods·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Route) Name (@"".name·3 string) (? *@"".Route) + func (@"".r·2 *@"".Route) Path (@"".tpl·3 string) (? *@"".Route) + func (@"".r·2 *@"".Route) PathPrefix (@"".tpl·3 string) (? *@"".Route) + func (@"".r·2 *@"".Route) Queries (@"".pairs·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Route) Schemes (@"".schemes·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Route) Subrouter () (? *@"".Router) + func (@"".r·3 *@"".Route) URL (@"".pairs·4 ...string) (? *@"net/url".URL, ? error) + func (@"".r·3 *@"".Route) URLHost (@"".pairs·4 ...string) (? *@"net/url".URL, ? error) + func (@"".r·3 *@"".Route) URLPath (@"".pairs·4 ...string) (? *@"net/url".URL, ? error) + func (@"".r·2 *@"".Route) @"".addMatcher (@"".m·3 @"".matcher) (? *@"".Route) { if @"".r·2.@"".err == nil { @"".r·2.@"".matchers = append(@"".r·2.@"".matchers, @"".m·3) }; return @"".r·2 } + func (@"".r·2 *@"".Route) @"".addRegexpMatcher (@"".tpl·3 string, @"".matchHost·4 bool, @"".matchPrefix·5 bool, @"".matchQuery·6 bool) (? error) + func (@"".r·2 *@"".Route) @"".buildVars (@"".m·3 map[string]string) (? map[string]string) + func (@"".r·2 *@"".Route) @"".getNamedRoutes () (? map[string]*@"".Route) + func (@"".r·2 *@"".Route) @"".getRegexpGroup () (? *@"".routeRegexpGroup) + func (@"".r·3 *@"".Route) @"".prepareVars (@"".pairs·4 ...string) (? map[string]string, ? error) + type @"".parentRoute interface { @"".buildVars(? map[string]string) (? map[string]string); @"".getNamedRoutes() (? map[string]*@"".Route); @"".getRegexpGroup() (? *@"".routeRegexpGroup) } + type @"".WalkFunc func(@"".route *@"".Route, @"".router *@"".Router, @"".ancestors []*@"".Route) (? error) + type @"".Router struct { NotFoundHandler @"net/http".Handler; @"".parent @"".parentRoute; @"".routes []*@"".Route; @"".namedRoutes map[string]*@"".Route; @"".strictSlash bool; KeepContext bool } + func (@"".r·2 *@"".Router) BuildVarsFunc (@"".f·3 @"".BuildVarsFunc) (? *@"".Route) + func (@"".r·2 *@"".Router) Get (@"".name·3 string "esc:0x0") (? *@"".Route) + func (@"".r·2 *@"".Router) GetRoute (@"".name·3 string "esc:0x0") (? *@"".Route) + func (@"".r·2 *@"".Router) Handle (@"".path·3 string, @"".handler·4 @"net/http".Handler) (? *@"".Route) + func (@"".r·2 *@"".Router) HandleFunc (@"".path·3 string, @"".f·4 func(? @"net/http".ResponseWriter, ? *@"net/http".Request)) (? *@"".Route) + func (@"".r·2 *@"".Router) Headers (@"".pairs·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Router) Host (@"".tpl·3 string) (? *@"".Route) + func (@"".r·2 *@"".Router "esc:0x0") Match (@"".req·3 *@"net/http".Request, @"".match·4 *@"".RouteMatch) (? bool) + func (@"".r·2 *@"".Router) MatcherFunc (@"".f·3 @"".MatcherFunc) (? *@"".Route) + func (@"".r·2 *@"".Router) Methods (@"".methods·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Router) NewRoute () (? *@"".Route) { var @"".route·3 *@"".Route; @"".route·3 = (&@"".Route{ @"".parent:@"".r·2, @"".strictSlash:@"".r·2.@"".strictSlash }); @"".r·2.@"".routes = append(@"".r·2.@"".routes, @"".route·3); return @"".route·3 } + func (@"".r·2 *@"".Router) Path (@"".tpl·3 string) (? *@"".Route) + func (@"".r·2 *@"".Router) PathPrefix (@"".tpl·3 string) (? *@"".Route) + func (@"".r·2 *@"".Router) Queries (@"".pairs·3 ...string) (? *@"".Route) + func (@"".r·2 *@"".Router) Schemes (@"".schemes·3 ...string) (? *@"".Route) + func (@"".r·1 *@"".Router) ServeHTTP (@"".w·2 @"net/http".ResponseWriter, @"".req·3 *@"net/http".Request) + func (@"".r·2 *@"".Router "esc:0x2") StrictSlash (@"".value·3 bool) (? *@"".Router) { @"".r·2.@"".strictSlash = @"".value·3; return @"".r·2 } + func (@"".r·2 *@"".Router) Walk (@"".walkFn·3 @"".WalkFunc "esc:0x0") (? error) + func (@"".r·2 *@"".Router) @"".buildVars (@"".m·3 map[string]string) (? map[string]string) + func (@"".r·2 *@"".Router) @"".getNamedRoutes () (? map[string]*@"".Route) + func (@"".r·2 *@"".Router) @"".getRegexpGroup () (? *@"".routeRegexpGroup) + func (@"".r·2 *@"".Router) @"".walk (@"".walkFn·3 @"".WalkFunc "esc:0x0", @"".ancestors·4 []*@"".Route) (? error) + func @"".NewRouter () (? *@"".Router) { return (&@"".Router{ @"".namedRoutes:make(map[string]*@"".Route, 0x0), KeepContext:false }) } + var @"".SkipRouter error + func @"".Vars (@"".r·2 *@"net/http".Request "esc:0x0") (? map[string]string) + func @"".CurrentRoute (@"".r·2 *@"net/http".Request "esc:0x0") (? *@"".Route) + func @"".init () + var @"time".months [12]string + var @"time".days [7]string + var @"time".Local *@"time".Location + var @"time".UTC *@"time".Location + var @"bufio".ErrInvalidUnreadRune error + var @"regexp/syntax".instOpNames []string + +$$ +_go_.6 0 0 0 644 311105 ` +go object darwin amd64 go1.4.2 X:precisestack + +! +go13lderrors.a +fmt.anet/http.a path.aregexp.aŽgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/context.abytes.anet/url.astrconv.astrings.aþ"".NewRouteràÐeH‹ %H;awèëêHƒì(HH‰$HÇD$èH‹\$H‰\$ HH‰$èH‹L$H‰ÏHƒùtI1ÀèH‰L$H‰ $Hƒ<$t)Hƒ$8H‹\$ H‰\$èH‹D$1í@ˆhAH‰D$0HƒÄ(É%ëΉë³ + 0runtime.morestack_noctxt:2type.map[string]*"".Route^runtime.makemap€type."".Router’"runtime.newobject¼Ü runtime.duffzero„.runtime.writebarrierptrP"".autotmp_0002type.*"".Router"".autotmp_00012type.map[string]*"".Route "".~r0type.*"".RouterP€OP° &–.9/Tgclocals·37da6a443256db8ec55c7210d030a9b0Tgclocals·f6dcde45bff02c6c4b088b594fd52a4c¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ$"".(*Router).Match „eH‹ %H;awèëêHƒìPH‹\$XHƒû„’H‹S H‹C(H‹k0H‰l$H1ÉH‰D$@H‰D$ H‰T$8H‰ÐH‹l$ H9é}YH‰D$0H‹(H‰L$(H‰,$H‹\$`H‰\$H‹\$hH‰\$è¶\$€ût +ÆD$pHƒÄPÃH‹D$0H‹L$(HƒÀHÿÁH‹l$ H9é|§ÆD$pHƒÄPÉégÿÿÿ + 0runtime.morestack_noctxtü""".(*Route).Match@ "".autotmp_0007?type.**"".Route"".autotmp_0006_type.int"".autotmp_0005Otype.int"".autotmp_0003/ type.[]*"".Route "".~r20type.bool"".match &type.*"".RouteMatch "".req,type.*net/http.Request"".rtype.*"".Router$ {Ÿ $Ÿ ÐrK' + + +  +}STgclocals·d69c4140875de858f5dc9e2e8acb0bc0Tgclocals·29f0050a5ee7c2b9348a75428171d7de¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ,"".(*Router).ServeHTTPàÒeH‹ %HD$€H;AwèëåHìH‹´$ H‹~Hƒÿ„ªHw8H<$H¥H¥èH‹L$H‹T$H‰L$HH‹œ$ H‹kHƒý„lH‹M8H‰Œ$ˆH‹E@H‰„$H9Â…bH‹l$HH‰,$H‰T$PH‰T$H‰L$H‰D$èH‹T$P¶\$ €û„-HH‰$èH‹D$HÇD$XHÇD$`H‹œ$H‰$H‹œ$ H‰\$H‰D$0H‰D$èH‹L$0¶\$€û„ÆH‹iH‰l$XH‹iH‰l$`H‹œ$ H‰$H‹iHH‰D$hH‰D$H‰l$pH‰l$èH‹œ$ H‰$H‹\$0H‹+HH‰D$hH‰D$H‰l$pH‰l$èHƒ|$X…H‹œ$Hƒû„+H‹+H‰l$XH‹kH‰l$`Hƒ|$XuVHÇD$8HÇD$@HH‰\$(H‹ 1íH9é„°H‹T$(H‰L$xH‰”$€H‰L$8H‰L$XH‰T$@H‰T$`H‹¬$¶]A€ûu"H‹œ$ H‰$H QjèYYH…ÀuIH‹œ$H‰\$H‹œ$H‰\$H‹œ$ H‰\$H‹\$`H‰$H‹\$XH‹[ ÿÓèHÄÃèHÄÃHH‰$HH‰\$HH‰\$èH‹L$éÿÿÿ‰éÎþÿÿé«þÿÿH‹¼$ H‹wHƒþ„ÌH¼$˜èH‹\$HH‰œ$ÐH‰T$PH‰”$ØHœ$˜H‰$èH‹T$H‹L$H‰T$HH‰L$PH‹œ$H‰$H‹œ$H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥H‹\$HH‰\$H‹\$PH‰\$ èHÇD$-H‹œ$H‰$H‹œ$H‹[0ÿÓèHÄÉé-ÿÿÿ‰EéŒüÿÿ‰éOüÿÿ: +*0runtime.morestack_noctxtŠ"".cleanPathÚ runtime.eqstringŽ$type."".RouteMatch "runtime.newobjectž$"".(*Router).Match–,type.map[string]stringÈ"".setVarsþtype.*"".Route°$"".setCurrentRouteÐ(net/http.NotFound·fèZgo.itab.net/http.HandlerFunc.net/http.Handler’ œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Clear·f¢ "runtime.deferprocª + +² +&runtime.deferreturnÎ +&runtime.deferreturnì +2type.net/http.HandlerFunc‚ *type.net/http.Handlerš Zgo.itab.net/http.HandlerFunc.net/http.Handler®  runtime.typ2Itab  ” runtime.duffcopyö *net/url.(*URL).StringÖ +ò (go.string."Location"¼&net/http.Header.Set† +Ž&runtime.deferreturn@€"".autotmp_0015*type.net/http.Handler"".autotmp_0014type.string"".autotmp_0013¯2type.net/http.HandlerFunc"".autotmp_0010type.string"".autotmp_0009ïtype.string"".&matchŸ&type.*"".RouteMatch "".~r0*type.net/http.Handler"".handlerÏ*type.net/http.Handler "".urlÏ type.net/url.URL"".pïtype.string "".req0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".rtype.*"".RouterD"€¬Mÿ€ ÿ€Ÿÿ€ðvŒ"¢;04 #V";2#%O#  2Dh#?‰þAd12STgclocals·7a383875e23784cb158d762414ce6278Tgclocals·4c1561a135d5ed5147fd4ff64ff73c94¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ "".(*Router).Get€äeH‹ %H;awèëêHƒì8H‹\$@H‰$èH‹D$H‹T$HH‹L$PHH‰$H‰D$H‰T$(H‰T$H‰L$0H‰L$èH‹\$ H‹+H‰l$XHƒÄ8à + + 0runtime.morestack_noctxtH6"".(*Router).getNamedRoutest2type.map[string]*"".Route¸4runtime.mapaccess1_faststr@p"".autotmp_0023type.string "".~r10type.*"".Route"".nametype.string"".rtype.*"".RouterpWo€ Òf +#]Tgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ*"".(*Router).GetRoute€äeH‹ %H;awèëêHƒì8H‹\$@H‰$èH‹D$H‹T$HH‹L$PHH‰$H‰D$H‰T$(H‰T$H‰L$0H‰L$èH‹\$ H‹+H‰l$XHƒÄ8à + + 0runtime.morestack_noctxtH6"".(*Router).getNamedRoutest2type.map[string]*"".Route¸4runtime.mapaccess1_faststr@p"".autotmp_0026type.string "".~r10type.*"".Route"".nametype.string"".rtype.*"".RouterpWo€ Þf +#]Tgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ0"".(*Router).StrictSlash@(H‹D$¶l$@ˆh@H‰D$Ã0 "".~r1 type.*"".Router"".valuetype.bool"".rtype.*"".Router  ‚ Tgclocals·64b411f0f44be3f38c26e84fc3239091Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ6"".(*Router).getNamedRoutesÀ´eH‹ %H;awèëêHƒì(H‹D$0H‹X81íH9ëuLHƒxt\H‹HH‹hH‰l$ H‰,$H‰L$H‹Y(ÿÓH‹D$H‹\$0H‰$Hƒ<$t"Hƒ$8H‰D$èH‹D$0H‹h8H‰l$8HƒÄ(É%ëÕHH‰$HÇD$èH‹D$H‹\$0H‰$Hƒ<$tHƒ$8H‰D$èH‹D$0륉%ëá + 0runtime.morestack_noctxt– +Ú.runtime.writebarrierptr 2type.map[string]*"".RouteÄruntime.makemapŒ.runtime.writebarrierptr P"".autotmp_00292type.map[string]*"".Route "".~r02type.map[string]*"".Route"".rtype.*"".RouterPiOP\à(– E + B K•Tgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ6"".(*Router).getRegexpGroupÀ¾eH‹ %H;awèëêHƒì H‹D$(Hƒxt+H‹HH‹hH‰l$H‰,$H‰L$H‹Y0ÿÓH‹\$H‰\$0HƒÄ ÃHÇD$0HƒÄ à + 0runtime.morestack_noctxt€ + @ "".~r02type.*"".routeRegexpGroup"".rtype.*"".Router@6?@ ?`®+ +@ Tgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ,"".(*Router).buildVarsÀ¶eH‹ %H;awèëêHƒì(H‹L$0H‹D$8Hƒyt&H‹QH‹iH‰D$H‰l$ H‰,$H‰T$H‹Z ÿÓH‹D$H‰D$@HƒÄ(à + 0runtime.morestack_noctxt” +0P "".~r1 ,type.map[string]string"".m,type.map[string]string"".rtype.*"".RouterP@O`¼$& +JTgclocals·d3486bc7ce1948dc22d7ad1c0be2887aTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ*"".(*Router).NewRouteàÈeH‹ %HD$èH;AwèëåHì˜H‹œ$ H‰\$PHH‰$èH‹|$H‰ùH‰|$HHƒÿ„Ç1ÀèH‹1íH9è„wH‰ $Hƒ<$„\H‹L$PH‰D$XH‰D$H‰L$`H‰L$èH‹œ$ H‹D$HHƒø„ ¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$€H‰Œ$ˆH‰œ$H‰ØH)ËHƒû}FHH‰$H‰T$hH‰T$H‰L$pH‰L$H‰D$xH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$pH‰D$xH‰T$hHÊH‰$H‹\$@H‰\$èH‹T$hH‹L$pH‹D$xH‹œ$ H‰$Hƒ<$tFHƒ$ H‰”$€H‰T$H‰Œ$ˆH‰L$H‰„$H‰D$èH‹\$@H‰œ$¨HĘÉ%뱉éÙþÿÿ‰%é˜þÿÿHH‰$HH‰\$HH‰\$èH‹L$HH‹D$éRþÿÿ‰é2þÿÿ +*0runtime.morestack_noctxtdtype."".Routev"runtime.newobject²È runtime.duffzeroÀBgo.itab.*"".Router."".parentRoute°2runtime.writebarrierifaceæ type.[]*"".RouteÆ"runtime.growslice¼.runtime.writebarrierptrâ2runtime.writebarriersliceÒtype.*"".Routerè&type."".parentRoute€Bgo.itab.*"".Router."".parentRoute” runtime.typ2Itab °"".autotmp_0040_ type.[]*"".Route"".autotmp_0039/ type.[]*"".Route"".autotmp_0037Ÿtype.*"".Route"".autotmp_0036 type.[]*"".Route"".autotmp_0035type.*"".Router"".route¯type.*"".Route "".~r0type.*"".Route"".rtype.*"".Router"°§¯°f° Ô"žõ ] :]‹;SY'Tgclocals·0d6246443c3fddb7ffb759a83afd407dTgclocals·691c0cb9316c0a5f7d8580c74ac115f2¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ&"".(*Router).Handle  † eH‹ %HD$ÈH;AwèëåHì¸H‹„$ÀH‰D$PH‰D$`HH‰$èH‹|$H‰ùH‰|$XHƒÿ„a1ÀèH‹1íH9è„H‰ $Hƒ<$„öH‹L$`H‰D$xH‰D$H‰Œ$€H‰L$èH‹\$PH‹D$XHƒø„º¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ H‰Œ$¨H‰œ$°H‰ØH)ËHƒû}OHH‰$H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$H‰„$˜H‰”$ˆHÊH‰$H‹\$@H‰\$èH‹”$ˆH‹Œ$H‹„$˜H‹\$PH‰$Hƒ<$„ÁHƒ$ H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$èH‹\$@H‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$èH‹D$H‹”$ØH‹Œ$àHƒxXu3H‰D$HH‰$Hƒ<$t3Hƒ$H‰T$hH‰T$H‰L$pH‰L$èH‹D$HH‰„$èHĸÉ%ëĉ%é3ÿÿÿ‰é?þÿÿ‰%éþýÿÿHH‰$HH‰\$HH‰\$èH‹L$XH‹D$é¸ýÿÿ‰é˜ýÿÿ" +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteÀ2runtime.writebarrierifaceð type.[]*"".Routeâ"runtime.growsliceê.runtime.writebarrierptr¤2runtime.writebarriersliceô "".(*Route).Pathˆ 2runtime.writebarrieriface +type.*"".Router¦ +&type."".parentRoute¾ +Bgo.itab.*"".Router."".parentRouteÒ + runtime.typ2Itab`ð"".autotmp_0056_ type.[]*"".Route"".autotmp_0055/ type.[]*"".Route"".autotmp_0053¿type.*"".Route"".autotmp_0052type.*"".Route"".autotmp_0051 type.[]*"".Route"".autotmp_0050¯type.*"".Router"".handlerŸ*type.net/http.Handler"".rßtype.*"".Route"".routeïtype.*"".Route"".rÏtype.*"".Router "".~r2Ptype.*"".Route"".handler0*type.net/http.Handler"".pathtype.string"".rtype.*"".Router"ðºïðsÐä"®(?`‘D]( Je (Tgclocals·259efa0f9d5b5ab4cbb1f7201749d3e1Tgclocals·8cdbdba615b2fb90357456ca3f2cb9a4¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ."".(*Router).HandleFunc€ +ì eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„1ÀèH‹1íH9è„ÄH‰ $Hƒ<$„©H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„p¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$„‰Hƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹\$H‰$H‹œ$ÀH‰\$èH‹\$H‰œ$ÈHĠÉ%ékÿÿÿ‰é‰þÿÿ‰%éKþÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$éþÿÿ‰éåýÿÿ" +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrú2runtime.writebarriersliceÊ "".(*Route).Path€."".(*Route).HandlerFuncötype.*"".RouterŒ &type."".parentRoute¤ Bgo.itab.*"".Router."".parentRoute¸  runtime.typ2ItabPÀ"".autotmp_0073_ type.[]*"".Route"".autotmp_0072/ type.[]*"".Route"".autotmp_0070Ÿtype.*"".Route"".autotmp_0069type.*"".Route"".autotmp_0068type.*"".Route"".autotmp_0067 type.[]*"".Route"".autotmp_0066type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r2@type.*"".Route"".f0jtype.func(net/http.ResponseWriter, *net/http.Request)"".pathtype.string"".rtype.*"".Router"Àö¿Àg€ò"Þ$?]‹>W( w%Tgclocals·1c7793dad628d89b0b03aa7a6b5e8ac7Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ("".(*Router).Headersà È eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„1ÀèH‹1íH9脲H‰ $Hƒ<$„—H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„^¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$t{Hƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹\$ H‰œ$ÈHĠÉ%éyÿÿÿ‰é›þÿÿ‰%é]þÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$éþÿÿ‰é÷ýÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarriersliceÜ&"".(*Route).HeadersÒtype.*"".Routerè&type."".parentRoute€ Bgo.itab.*"".Router."".parentRoute”  runtime.typ2ItabPÀ"".autotmp_0089_ type.[]*"".Route"".autotmp_0088/ type.[]*"".Route"".autotmp_0086Ÿtype.*"".Route"".autotmp_0085type.*"".Route"".autotmp_0084 type.[]*"".Route"".autotmp_0083type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r1@type.*"".Route"".pairstype.[]string"".rtype.*"".Router"Àä¿Àiðþ"Î$?]‹>S5 \'Tgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ""".(*Router).HostÀ ¨ eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„ò1ÀèH‹1íH9è„¢H‰ $Hƒ<$„‡H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„N¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$tnHƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹\$H‰œ$ÀHĠÉ%뉉é«þÿÿ‰%émþÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$é'þÿÿ‰éþÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarrierslice "".(*Route).Host²type.*"".RouterÈ&type."".parentRouteàBgo.itab.*"".Router."".parentRouteô runtime.typ2Itab@À"".autotmp_0105_ type.[]*"".Route"".autotmp_0104/ type.[]*"".Route"".autotmp_0102Ÿtype.*"".Route"".autotmp_0101type.*"".Route"".autotmp_0100 type.[]*"".Route"".autotmp_0099type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r10type.*"".Route "".tpltype.string"".rtype.*"".Router"À׿ÀfàŠ"¾$?]‹>S( Y'Tgclocals·ccff1a4364f53102a1b73e3274c6c0d4Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ0"".(*Router).MatcherFunc  Ž eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„å1ÀèH‹1íH9è„•H‰ $Hƒ<$„zH‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„A¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$taHƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$èH‹\$H‰œ$¸HĠÉ%ë–‰é¸þÿÿ‰%ézþÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$é4þÿÿ‰éþÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarrierslice¨."".(*Route).MatcherFunc˜type.*"".Router®&type."".parentRouteÆBgo.itab.*"".Router."".parentRouteÚ runtime.typ2Itab0À"".autotmp_0121_ type.[]*"".Route"".autotmp_0120/ type.[]*"".Route"".autotmp_0118Ÿtype.*"".Route"".autotmp_0117type.*"".Route"".autotmp_0116 type.[]*"".Route"".autotmp_0115type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r1 type.*"".Route"".f&type."".MatcherFunc"".rtype.*"".Router"ÀÊ¿ÀcЖ"®$?]‹>S Y$Tgclocals·6b6fbfacf80ed81d2be06478c8f1790dTgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ("".(*Router).Methodsà È eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„1ÀèH‹1íH9脲H‰ $Hƒ<$„—H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„^¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$t{Hƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹\$ H‰œ$ÈHĠÉ%éyÿÿÿ‰é›þÿÿ‰%é]þÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$éþÿÿ‰é÷ýÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarriersliceÜ&"".(*Route).MethodsÒtype.*"".Routerè&type."".parentRoute€ Bgo.itab.*"".Router."".parentRoute”  runtime.typ2ItabPÀ"".autotmp_0137_ type.[]*"".Route"".autotmp_0136/ type.[]*"".Route"".autotmp_0134Ÿtype.*"".Route"".autotmp_0133type.*"".Route"".autotmp_0132 type.[]*"".Route"".autotmp_0131type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r1@type.*"".Route"".methodstype.[]string"".rtype.*"".Router"Àä¿Àið¢"Î$?]‹>S5 \'Tgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ""".(*Router).PathÀ ¨ eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„ò1ÀèH‹1íH9è„¢H‰ $Hƒ<$„‡H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„N¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$tnHƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹\$H‰œ$ÀHĠÉ%뉉é«þÿÿ‰%émþÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$é'þÿÿ‰éþÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarrierslice "".(*Route).Path²type.*"".RouterÈ&type."".parentRouteàBgo.itab.*"".Router."".parentRouteô runtime.typ2Itab@À"".autotmp_0153_ type.[]*"".Route"".autotmp_0152/ type.[]*"".Route"".autotmp_0150Ÿtype.*"".Route"".autotmp_0149type.*"".Route"".autotmp_0148 type.[]*"".Route"".autotmp_0147type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r10type.*"".Route "".tpltype.string"".rtype.*"".Router"À׿Àfà®"¾$?]‹>S( Y'Tgclocals·ccff1a4364f53102a1b73e3274c6c0d4Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ."".(*Router).PathPrefixÀ ¨ eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„ò1ÀèH‹1íH9è„¢H‰ $Hƒ<$„‡H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„N¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$tnHƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹\$H‰œ$ÀHĠÉ%뉉é«þÿÿ‰%émþÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$é'þÿÿ‰éþÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarriersliceÂ,"".(*Route).PathPrefix²type.*"".RouterÈ&type."".parentRouteàBgo.itab.*"".Router."".parentRouteô runtime.typ2Itab@À"".autotmp_0169_ type.[]*"".Route"".autotmp_0168/ type.[]*"".Route"".autotmp_0166Ÿtype.*"".Route"".autotmp_0165type.*"".Route"".autotmp_0164 type.[]*"".Route"".autotmp_0163type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r10type.*"".Route "".tpltype.string"".rtype.*"".Router"À׿Àfàº"¾$?]‹>S( Y'Tgclocals·ccff1a4364f53102a1b73e3274c6c0d4Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ("".(*Router).Queriesà È eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„1ÀèH‹1íH9脲H‰ $Hƒ<$„—H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„^¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$t{Hƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹\$ H‰œ$ÈHĠÉ%éyÿÿÿ‰é›þÿÿ‰%é]þÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$éþÿÿ‰é÷ýÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarriersliceÜ&"".(*Route).QueriesÒtype.*"".Routerè&type."".parentRoute€ Bgo.itab.*"".Router."".parentRoute”  runtime.typ2ItabPÀ"".autotmp_0185_ type.[]*"".Route"".autotmp_0184/ type.[]*"".Route"".autotmp_0182Ÿtype.*"".Route"".autotmp_0181type.*"".Route"".autotmp_0180 type.[]*"".Route"".autotmp_0179type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r1@type.*"".Route"".pairstype.[]string"".rtype.*"".Router"Àä¿ÀiðÆ"Î$?]‹>S5 \'Tgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ("".(*Router).Schemesà È eH‹ %HD$àH;AwèëåHì H‹„$¨H‰D$HH‰D$XHH‰$èH‹|$H‰ùH‰|$PHƒÿ„1ÀèH‹1íH9脲H‰ $Hƒ<$„—H‹L$XH‰D$`H‰D$H‰L$hH‰L$èH‹\$HH‹D$PHƒø„^¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$ˆH‰Œ$H‰œ$˜H‰ØH)ËHƒû}IHH‰$H‰T$pH‰T$H‰L$xH‰L$H‰„$€H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$xH‰„$€H‰T$pHÊH‰$H‹\$@H‰\$èH‹T$pH‹L$xH‹„$€H‹\$HH‰$Hƒ<$t{Hƒ$ H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$èH‹\$@H‰$H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹\$ H‰œ$ÈHĠÉ%éyÿÿÿ‰é›þÿÿ‰%é]þÿÿHH‰$HH‰\$HH‰\$èH‹L$PH‹D$éþÿÿ‰é÷ýÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÐ"runtime.growsliceÌ.runtime.writebarrierptrò2runtime.writebarriersliceÜ&"".(*Route).SchemesÒtype.*"".Routerè&type."".parentRoute€ Bgo.itab.*"".Router."".parentRoute”  runtime.typ2ItabPÀ"".autotmp_0201_ type.[]*"".Route"".autotmp_0200/ type.[]*"".Route"".autotmp_0198Ÿtype.*"".Route"".autotmp_0197type.*"".Route"".autotmp_0196 type.[]*"".Route"".autotmp_0195type.*"".Router"".route¿type.*"".Route"".r¯type.*"".Router "".~r1@type.*"".Route"".schemestype.[]string"".rtype.*"".Router"Àä¿ÀiðÒ"Î$?]‹>S5 \'Tgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0Tgclocals·f24e5ae57611d01ccf1f96d64c337e04¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ4"".(*Router).BuildVarsFuncà Ú eH‹ %HD$ØH;AwèëåHì¨H‹„$°H‰D$PH‰D$`HH‰$èH‹|$H‰ùH‰|$XHƒÿ„ 1ÀèH‹1íH9è„»H‰ $Hƒ<$„ H‹L$`H‰D$hH‰D$H‰L$pH‰L$èH‹\$PH‹D$XHƒø„g¶k@@ˆh@H‰D$@H‹S H‹K(H‹[0H‰”$H‰Œ$˜H‰œ$ H‰ØH)ËHƒû}LHH‰$H‰T$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$€H‰„$ˆH‰T$xHÊH‰$H‹\$@H‰\$èH‹T$xH‹Œ$€H‹„$ˆH‹\$PH‰$Hƒ<$t{Hƒ$ H‰”$H‰T$H‰Œ$˜H‰L$H‰„$ H‰D$èH‹L$@H‹„$¸H‰L$HH‰ $Hƒ<$t$Hƒ$hH‰D$èH‹\$HH‰œ$ÀHĨÉ%ëÓ‰%éyÿÿÿ‰é’þÿÿ‰%éTþÿÿHH‰$HH‰\$HH‰\$èH‹L$XH‹D$éþÿÿ‰éîýÿÿ +*0runtime.morestack_noctxtntype."".Route€"runtime.newobject¼È runtime.duffzeroÊBgo.itab.*"".Router."".parentRouteº2runtime.writebarrierifaceê type.[]*"".RouteÖ"runtime.growsliceØ.runtime.writebarrierptr„2runtime.writebarriersliceÜ.runtime.writebarrierpträtype.*"".Routerú&type."".parentRoute’ Bgo.itab.*"".Router."".parentRoute¦  runtime.typ2Itab0Ð"".autotmp_0216_ type.[]*"".Route"".autotmp_0215/ type.[]*"".Route"".autotmp_0213Ÿtype.*"".Route"".autotmp_0212 type.[]*"".Route"".autotmp_0211type.*"".Router"".r¿type.*"".Route"".routeÏtype.*"".Route"".r¯type.*"".Router "".~r1 type.*"".Route"".f*type."".BuildVarsFunc"".rtype.*"".Router"ÐäÏÐiðÞ"Î$?]ŽAV,e Tgclocals·776d19cc6eced68e652f85d577f321c6Tgclocals·38f35918b64660b95e0269a6592b7ed4¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ""".(*Router).WalkÀ¾eH‹ %H;awèëêHƒìPHÇD$hHÇD$pHH‰$èH‹l$HƒýtSH‹\$X1Ò1ÉH‰$H‹\$`H‰\$H‰l$8H‰l$H‰T$@H‰T$H‰L$HH‰L$ èH‹L$(H‹D$0H‰L$hH‰D$pHƒÄPÉEë¨ + + 0runtime.morestack_noctxt^"type.[0]*"".Routep"runtime.newobjectú""".(*Router).walk@ "".autotmp_0227/ type.[]*"".Route "".~r1 type.error"".walkFn type."".WalkFunc"".rtype.*"".Router Ÿ   ì,t +7iTgclocals·e0dd5664695c71438932a711825a98a4Tgclocals·0528ab8f76149a707fd2f0025c2178a3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ""".(*Router).walkàÖeH‹ %H„$hÿÿÿH;AwèëâHìHÇ„$HHÇ„$PH‹œ$ Hƒû„•H‹K H‹C(H‹k0H‰¬$HÇD$XH‰„$H‰D$PH‰Œ$H‰L$xH‹\$XH‹l$PH9ëH‹\$xH‹H‹X81íH9ë„ÉH‹h8H‹]1íH9넶H‹h8L‹EI‹XHƒû„ H‰D$`H‰$H‹œ$ H‰\$H‹œ$0H‰\$H‹œ$8H‰\$H‹œ$@H‰\$ H‹”$(H‹ÿÓH‹D$(H‹L$0H‰Œ$¸H‰„$°H‹-H9èu~H‰$H‰L$H‹-H‰l$H‹-H‰l$è¶\$ €ûtNH‹\$xHƒÃH‰\$xH‹\$XHÿÃH‰\$XH‹\$XH‹l$PH9ëŒòþÿÿHÇ„$HHÇ„$PHÄÃH‹\$`Hƒû„H‹S H‹C(H‹k0H‰¬$ø1ÉH‰„$ðH‰D$@H‰”$èH‰ÐH‹l$@H9éøH‰D$pHƒø„µH‹H‹hH‰L$HH‰”$ÀH‰¬$ÈHH‰$H‰”$€H‰T$H‰¬$ˆH‰l$èH‹L$¶\$ €ûtuH‰ $H‹œ$(H‰\$H‹œ$0H‰\$H‹œ$8H‰\$H‹œ$@H‰\$ èH‹L$(H‹D$0H‰„$¨HƒùH‰Œ$ tH‰Œ$HH‰„$PHÄÃH‹D$pH‹L$HHƒÀHÿÁH‹l$@H9éŒÿÿÿHH‰$H‹|$`Hƒÿ„«HoH|$H‰îH¥H¥èH‹\$H‰\$h¶\$ €û„(þÿÿH‹”$0H‹Œ$8H‹œ$@H‰ØH)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ØH‰„$àH‰”$ÐHÊH‰$H‹\$`H‰\$èH‹”$ÐH‹Œ$ØH‹„$àH‹\$hH‰$H‹œ$(H‰\$H‰”$0H‰T$H‰Œ$8H‰L$H‰„$@H‰D$ èH‹L$(H‹D$0H‰„$˜HƒùH‰Œ$tH‰Œ$HH‰„$PHÄÃH‹„$8HÿÈH‹Œ$@H9Ár%H‹”$0H‰”$0H‰„$8H‰Œ$@é±üÿÿè ‰éNþÿÿ‰éDýÿÿ‰éòüÿÿ‰édûÿÿ" +00runtime.morestack_noctxt +€"".SkipRouterª"".SkipRouterÂ"".SkipRouterÖruntime.ifaceeq€ type.*"".RouterÆ $runtime.assertI2T2Þ +""".(*Router).walkš type.*"".Routerê $runtime.assertI2T2ð  type.[]*"".Routeâ"runtime.growsliceê.runtime.writebarrierptrž""".(*Router).walk’$runtime.panicslicep°8"".autotmp_0254type.uint64"".autotmp_0253type.uint64"".autotmp_0252type.int"".autotmp_0249type.int"".autotmp_0248type.int"".autotmp_0247 type.[]*"".Route"".autotmp_0246¯type."".matcher"".autotmp_0245Ï type.*"".matcher"".autotmp_0244¯type.int"".autotmp_0243Ÿtype.int"".autotmp_0241¿type.**"".Route"".autotmp_0240type.int"".autotmp_0239ÿtype.int"".autotmp_0238type.int"".autotmp_0237type.error"".autotmp_0235type.error"".autotmp_0234_"type.[]"".matcher"".autotmp_0232/ type.[]*"".Route "".errtype.error"".hßtype.*"".Router "".errïtype.error +"".sr¯type."".matcher "".errÏtype.error"".tïtype.*"".Route "".~r2Ptype.error"".ancestors  type.[]*"".Route"".walkFn type."".WalkFunc"".rtype.*"".Router6%°¡¯°£¯°Ÿ¯°eð f†=d8\D .4 !v9OGÀT8-.  (¡ÁÒ|DZz (Tgclocals·02f53cdec99f366e42fb544f32ed9035Tgclocals·e1370d8c0370fc841121204684c0e45d¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ"".VarsààeH‹ %H;awèëêHƒì@HÇD$(HH‰$H\$(H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$HH‰$èH‹D$H‹L$ Hƒøt3HH‰$H‰D$0H‰D$H‰L$8H‰L$èH‹\$H‰\$PHƒÄ@ÃHÇD$PHƒÄ@à + 0runtime.morestack_noctxtL$type."".contextKeyrruntime.convT2E¶’github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Getä,type.map[string]stringž"runtime.assertE2T €"".autotmp_0261/$type."".contextKey +"".rv"type.interface {} "".~r1,type.map[string]string"".r,type.*net/http.Request€‡€ °èU3 +8xTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ"".CurrentRouteààeH‹ %H;awèëêHƒì@HÇD$(HH‰$H\$(H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$HH‰$èH‹D$H‹L$ Hƒøt3HH‰$H‰D$0H‰D$H‰L$8H‰L$èH‹\$H‰\$PHƒÄ@ÃHÇD$PHƒÄ@à + 0runtime.morestack_noctxtL$type."".contextKeyrruntime.convT2E¶’github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Getätype.*"".Routež"runtime.assertE2T €"".autotmp_0263/$type."".contextKey +"".rv"type.interface {} "".~r1type.*"".Route"".r,type.*net/http.Request€‡€ °€U3 +8xTgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ"".setVars€ðeH‹ %H;awèëêHƒì0HÇD$(HH‰$H\$(H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$ èHƒÄ0à + + 0runtime.morestack_noctxtL$type."".contextKeyrruntime.convT2EÞ’github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Set0`"".autotmp_0265$type."".contextKey "".val"type.interface {}"".r,type.*net/http.Request`]_ €ŽY  +8HTgclocals·9b807a1de79759fa48658b2ca8ff7282Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ$"".setCurrentRoute€ðeH‹ %H;awèëêHƒì0HÇD$(HH‰$H\$(H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$8H‰$H‹\$@H‰\$H‹\$HH‰\$ èHƒÄ0à + + 0runtime.morestack_noctxtL$type."".contextKeyrruntime.convT2EÞ’github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Set0`"".autotmp_0266$type."".contextKey "".val"type.interface {}"".r,type.*net/http.Request`]_ €–Y  +8HTgclocals·9b807a1de79759fa48658b2ca8ff7282Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ"".cleanPath ’eH‹ %H;awèëêHƒìPH‹L$XH‹D$`HÇD$hHÇD$pHƒøuHH‹+H‰l$hH‹kH‰l$pHƒÄPÃHƒø†¶€û/t.HH,$H‰ïH‰ÞH¥H¥H‰L$H‰D$èH‹L$ H‹D$(H‰L$XH‰ $H‰D$`H‰D$èH‹t$`H‹T$H‹D$H‰õHÿÍH‹\$XH9õƒ§H+¶€û/uHHƒøuQH‰T$0H‰$H‰D$8H‰D$H-LD$L‰ÇH‰îH¥H¥èH‹T$0H‹D$8¶\$ €ûtH‰T$hH‰D$pHƒÄPÃH‰T$0H‰D$8H‰T$@H‰$H‰D$HH‰D$HHl$H‰ïH‰ÞH¥H¥èH‹T$ H‹D$(ë­è è  + 0runtime.morestack_noctxt~go.string."/"Ügo.string."/"–*runtime.concatstring2Úpath.Cleanøgo.string."/"  runtime.eqstring®go.string."/"Ö*runtime.concatstring2ø$runtime.panicindex†$runtime.panicindex@  +"".autotmp_0269type.string"".autotmp_0267type.string +"".np?type.string "".~r1 type.string"".ptype.string& >Ÿ ÝŸ Y4ª6.'hB Š…[&Tgclocals·771157e6981a4b26b64a947269cc9ecbTgclocals·29f0050a5ee7c2b9348a75428171d7de¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ"".uniqueVars€ æ eH‹ %H„$pÿÿÿH;AwèëâHìHÇ„$HHÇ„$PL‹„$H‹„$ H‹œ$(H‰œ$E1ÉH‰„$H‰D$HL‰„$øH‹l$HI9éAL‰D$`Iƒø„YI‹I‹xL‰L$PH‰L$xH‹Œ$0H‹„$8H‹œ$@H‰œ$ð1ÒH‰„$èH‰D$8H‰Œ$àH‹l$8H9êÇH‰L$XHƒù„íH‹1H‹AH‰T$@H‰´$˜H‰„$ H9Ç…~H‹l$xH‰,$H‰¼$€H‰|$H‰t$hH‰t$H‰D$pH‰D$èL‹L$PL‹D$`H‹¼$€H‹T$@H‹L$X¶\$ €û„%H‹\$hH‰œ$¨H‹\$pH‰œ$°Hœ$¸HÇHÇCHœ$¸Hƒû„ÛHÇÂHÇÁH‰œ$ÈH‰”$ÐH‰Œ$ØHH‰$Hœ$¨H‰\$èH‹L$H‹D$H‹œ$ÈH‰$H‰Œ$ˆH‰L$H‰„$H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$H‹œ$ÐH‰\$H‹œ$ØH‰\$ èH‹L$(H‹D$0H‰Œ$HH‰„$PHÄÉéÿÿÿHƒÁHÿÂH‹l$8H9êŒ9þÿÿIƒÀIÿÁH‹l$HI9錿ýÿÿHÇ„$HHÇ„$PHÄÉé þÿÿA‰éŸýÿÿ +00runtime.morestack_noctxtŽ runtime.eqstringÂtype.stringîruntime.convT2EØ2runtime.writebarrierifaceæZgo.string."mux: duplicated route variable %q"Ú fmt.Errorf€ &"".autotmp_0285"type.interface {}"".autotmp_0283&type.[]interface {}"".autotmp_0282type.string"".autotmp_0281ïtype.*string"".autotmp_0280¯type.int"".autotmp_0279Ÿtype.int"".autotmp_0278ïtype.string"".autotmp_0277ßtype.*string"".autotmp_0276type.int"".autotmp_0275ÿtype.int"".autotmp_0273Ïtype.string"".autotmp_0272¯(type.[1]interface {}"".autotmp_0271_type.[]string"".autotmp_0270/type.[]string +"".v2Ïtype.string +"".v1¯type.string "".~r2`type.error +"".s20type.[]string +"".s1type.[]string&% íŸ PŸ €.Ì=fpb¥ Æ°v”Tgclocals·90aaa11a3c4e552027084aaae119235bTgclocals·a4b09b32f70466d9a6c07b8385c51f8a¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ"".checkPairsàÌeH‹ %HD$ðH;AwèëåHìH‹”$ HÇ„$¸HÇ„$ÀH‰ÑHÁù?H‰ÓH‰T$8H)ËHƒãHËHƒû„H‹œ$˜H‰\$xH‰”$€H‹œ$¨H‰œ$ˆH\$PHÇHÇCH\$PHƒû„ÊHÇÂHÇÁH‰\$`H‰T$hH‰L$pHH‰$H\$xH‰\$èH‹L$H‹D$H‹\$`H‰$H‰L$@H‰L$H‰D$HH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$`H‰\$H‹\$hH‰\$H‹\$pH‰\$ èH‹L$(H‹D$0H‹\$8H‰œ$°H‰Œ$¸H‰„$ÀHÄÉé/ÿÿÿH‰”$°HÇ„$¸HÇ„$ÀHÄà +*0runtime.morestack_noctxtštype.[]stringÀruntime.convT2E˜2runtime.writebarrieriface¦†go.string."mux: number of parameters must be multiple of 2, got %v"ˆfmt.Errorf` "".autotmp_0296Ÿ"type.interface {}"".autotmp_0294_&type.[]interface {}"".autotmp_0293type.int"".autotmp_0291/type.[]string"".autotmp_0290(type.[1]interface {}"".length¯type.int "".~r2@type.error "".~r10type.int"".pairstype.[]string " ÔŸ .Ÿ °âB#™2ßdmTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216Tgclocals·7876b70d8da64fa07ca2fd3ecc71f905¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ."".mapFromPairsToString€€eH‹ %H;awèëêHƒìXHÇ„$€HÇ„$ˆH‹\$`H‰$H‹\$hH‰\$H‹\$pH‰\$èH‹L$H‹D$ H‹T$(H‰T$PHƒøH‰D$HtHÇD$xH‰„$€H‰”$ˆHƒÄXÃHH‰$H‰ËHÁû?H‰ÍH‰L$0H)ÝH‰ëHÑûH‰\$èH‹T$`H‹L$hH‹\$H‰\$@1ÀH‹l$0H9è}mHH‰$H‹\$@H‰\$H‰ÓH‰ÅH9Ès{HkíHëH‰\$H‰ÃH‰D$8HÿÃH‰ÕH9ËsUHkÛHÝH‰l$èH‹T$`H‹L$hH‹D$8HƒÀH‹l$0H9è|“H‹\$@H‰\$xHÇ„$€HÇ„$ˆHƒÄXÃè è  + 0runtime.morestack_noctxt "".checkPairs¨,type.map[string]stringôruntime.makemapÂ,type.map[string]stringÔ$runtime.mapassign1æ$runtime.panicindexô$runtime.panicindex`°"".i?type.int"".m/,type.map[string]string "".errtype.error"".lengthOtype.int "".~r2@type.error "".~r10,type.map[string]string"".pairstype.[]string&°v¯°à¯°€,ö26 A Z'OÚITgclocals·ca1ebfc68aaed1d083688775167e5178Tgclocals·61e2515c69061b8fed0e66ece719f936¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ,"".mapFromPairsToRegex€îeH‹ %H;awèëêHƒìpHÇ„$˜HÇ„$ H‹\$xH‰$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹L$H‹D$ H‹T$(H‰T$XHƒøH‰D$Pt!HÇ„$H‰„$˜H‰”$ HƒÄpÃHH‰$H‰ËHÁû?H‰ÍH‰L$0H)ÝH‰ëHÑûH‰\$èH‹\$H‰\$@1ÀH‹l$0H9èÞH‰ÃH‰D$8HÿÃH‹l$xL‹„$€L9ÃîHkÛHÝH$H‰ßH‰îH¥H¥èH‹D$H‹L$H‹T$ H‰T$hHƒùH‰L$`t!HÇ„$H‰Œ$˜H‰”$ HƒÄpÃH‰D$HHH‰$H‹\$@H‰\$H‹\$xH‹l$8L‹„$€L9Ås\HkíHëH‰\$H\$HH‰\$èH‹D$8HƒÀH‹l$0H9èŒ"ÿÿÿH‹\$@H‰œ$HÇ„$˜HÇ„$ HƒÄpÃè è  + 0runtime.morestack_noctxt¬"".checkPairsºgo.itab.*bytes.Buffer.io.Writeröè runtime.duffzeroôtype.string runtime.convT2EŠ2runtime.writebarrieriface˜type.stringÄruntime.convT2E¶2runtime.writebarrierifaceÄtype.stringðruntime.convT2Eâ 2runtime.writebarrierifaceÂ!0go.string."%s(?P<%s>%s)"¸"fmt.Fprintf #>go.itab.*bytes.Buffer.io.Writerò$type.stringž%runtime.convT2Eˆ&2runtime.writebarrierifaceè&"go.string."%s%%s"Þ'fmt.Fprintfî(4runtime.writebarrierstringÚ*type.string†+runtime.convT2Eð+2runtime.writebarrierifaceþ+ go.string."^%s$"ò,fmt.SprintfÂ-regexp.Compileà..runtime.writebarrierptrÀ1 regexp.QuoteMetaª26bytes.(*Buffer).WriteStringä2 go.string."[/]?"†36bytes.(*Buffer).WriteStringâ3go.string."="–4strings.SplitNæ56bytes.(*Buffer).WriteString¦62bytes.(*Buffer).WriteByteö66bytes.(*Buffer).WriteStringª72bytes.(*Buffer).WriteByteŽ8"go.string.""Ø8regexp.Compileœ;"go.string.""Ø;&type."".routeRegexpê;"runtime.newobjectð<4runtime.writebarrierstring¾>.runtime.writebarrierptr´?4runtime.writebarrierstringÄ@2runtime.writebarriersliceÌA2runtime.writebarrierslicešE2runtime.slicebytetostring‚F$runtime.panicsliceºG2runtime.slicebytetostring‚H$runtime.panicsliceH$runtime.panicindexžH$runtime.panicslice¬H$runtime.panicindexÈH$runtime.panicindexèH$type.*bytes.BufferþHtype.io.Writer–I>go.itab.*bytes.Buffer.io.WriterªI runtime.typ2ItabêI$type.*bytes.Buffer€Jtype.io.Writer˜J>go.itab.*bytes.Buffer.io.Writer¬J runtime.typ2ItabÎMtype.stringúMruntime.convT2EäN2runtime.writebarrierifaceòN\go.string."mux: missing name or pattern in %q"æOfmt.ErrorfÚP$runtime.panicsliceèP$runtime.panicindexöP$runtime.panicindexŽQ$runtime.panicindexœQ$runtime.panicsliceªQ$runtime.panicindex¸Q$runtime.panicindexÆQ$runtime.panicsliceÔQ$runtime.panicindex®R$runtime.panicsliceÊR$runtime.panicsliceˆS"go.string."[^.]+"`° Ð"".autotmp_0416 (type.*"".routeRegexp"".autotmp_0415type.uint64"".autotmp_0414type.uint64"".autotmp_0413type.uint64"".autotmp_0412type.string"".autotmp_0411type.uint64"".autotmp_0410type.uint64"".autotmp_0409type.uint64"".autotmp_0408type.string"".autotmp_0407type.uint64"".autotmp_0406type.uint64"".autotmp_0405type.uint64"".autotmp_0403"type.interface {}"".autotmp_0402*type.*[1]interface {}"".autotmp_0401&type.[]interface {}"".autotmp_0400"type.interface {}"".autotmp_0399*type.*[1]interface {}"".autotmp_0398&type.[]interface {}"".autotmp_0397type.*uint8"".autotmp_0396"type.interface {}"".autotmp_0395"type.interface {}"".autotmp_0394"type.interface {}"".autotmp_0392&type.[]interface {}"".autotmp_0391ÿtype.*uint8"".autotmp_0390¿"type.interface {}"".autotmp_0388&type.[]interface {}"".autotmp_0387type.uint64"".autotmp_0386type.uint64"".autotmp_0385type.uint64"".autotmp_0384type.uint64"".autotmp_0383type.uint64"".autotmp_0382type.uint64"".autotmp_0381type.int"".autotmp_0380type.int"".autotmp_0378type.uint64"".autotmp_0377type.uint64"".autotmp_0376$type.*bytes.Buffer"".autotmp_0375$type.*bytes.Buffer"".autotmp_0374ï$type.*bytes.Buffer"".autotmp_0373$type.*bytes.Buffer"".autotmp_0372type.int"".autotmp_0371type.int"".autotmp_0370type.uint64"".autotmp_0369type.uint64"".autotmp_0368type.int"".autotmp_0365type.uint64"".autotmp_0363Ÿtype.string"".autotmp_0361type.[]string"".autotmp_0360type.string"".autotmp_0359type.int"".autotmp_0358type.string"".autotmp_0357type.string"".autotmp_0356(type.[1]interface {}"".autotmp_0355type.string"".autotmp_0354$type.*bytes.Buffer"".autotmp_0353(type.[1]interface {}"".autotmp_0352ÿtype.string"".autotmp_0351ßtype.string"".autotmp_0350type.string"".autotmp_0349$type.*bytes.Buffer"".autotmp_0348_(type.[3]interface {}"".autotmp_0346¿type.string"".autotmp_0345Ÿ(type.[1]interface {}"".autotmp_0344type.int"".autotmp_0343type.[]string"".autotmp_0342type.int"".autotmp_0340type.int"".autotmp_0338type.int"".autotmp_0337type.int "".~r0¿type.stringbytes.b·2Ÿ $type.*bytes.Buffer "".~r0ßtype.stringbytes.s·2¿type.stringbytes.s·2ÿtype.string"strings.suffix·3Ÿtype.stringstrings.s·2ßtype.string"".errCompileßtype.error "".reg¿ &type.*regexp.Regexp"".queryVal¿type.string "".rawÿtype.string"".varIdxÿ type.int"".pattŸtype.string"".nameÿtype.string"".partsŸtype.[]string "".rawßtype.string"".iï type.int "".errŸtype.error "".endß type.int"".reverseÏ $type.*bytes.Buffer"".pattern¯ $type.*bytes.Buffer"".varsRÿ*type.[]*regexp.Regexp"".varsNÏtype.[]string"".endSlash +type.bool""".defaultPatternÿtype.string"".templateŸtype.string"".errBraces¿type.error"".idxsïtype.[]int "".~r6@type.error "".~r50(type.*"".routeRegexp"".strictSlash&type.bool"".matchQuery$type.bool"".matchPrefix"type.bool"".matchHost type.bool "".tpltype.stringT%° À¯ ° ¬¯ ° ÿ¯ ° †¯ ° Š¯ ° Ëð)ä8=w$  ÆVVžÃ G¦! ( +ÚÓH¦,1:!S +$w+ +({ 4V09¨€  AAŽ      ÄV¢ÒO<P(<íù(¸Ì³ HŒvw°“HqÉCg;HDç 45\4$3('1Açv:'ÄTgclocals·794bc2a224f980dced8624445883a1f1Tgclocals·33531776c15af84406e52705f6739a4b¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ."".(*routeRegexp).MatchÀ¤eH‹ %H;awèëêHƒì0H‹L$@H‹D$8¶X€ûu^¶X€ûtH‰$H‰L$è¶\$ˆ\$PHƒÄ0ÃH‹hH‰,$H‹yHƒÿt#H_8H|$H‰ÞH¥H¥è¶\$ˆ\$PHƒÄ0ÉëÙH‰ $èH‹L$H‹D$H‹\$8H‹kH‰,$H‰L$ H‰L$H‰D$(H‰D$è¶\$ˆ\$PHƒÄ0à + 0runtime.morestack_noctxt€D"".(*routeRegexp).matchQueryStringê8regexp.(*Regexp).MatchString "".getHost€8regexp.(*Regexp).MatchString@`"".autotmp_0457type.bool"".autotmp_0456type.string"".autotmp_0455type.bool "".~r20type.bool"".match &type.*"".RouteMatch "".req,type.*net/http.Request"".r(type.*"".routeRegexp`7_`4_`J_à¨$  9U ?¡Tgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ*"".(*routeRegexp).url !!eH‹ %H„$àþÿÿH;AwèëâHì HÇ„$¸HÇ„$ÀHÇ„$ÈHÇ„$ÐH‹œ$¨H‹k8HH‰$H‰l$H‰l$èH‹T$H‹L$ H‹D$(H‰”$ H‰Œ$(H‰„$0H‹œ$¨Hƒû„H‹S0H‹K8H‹k@H‰¬$`1ÀH‰Œ$XH‰L$HH‰”$PH‰ÑH‹l$HH9èfH‰L$XHƒù„;H‹H‹iH‰D$PH‰D$@H‰”$€H‰¬$ˆHH‰$H‹œ$°H‰\$H‰”$ÀH‰T$H‰¬$ÈH‰l$èH‹L$ ¶\$(Hƒù„ÈH‹H‰T$pH‹iH‰l$x€û…CH‹œ$€H‰œ$H‹œ$ˆH‰œ$Hœ$HÇHÇCHœ$Hƒû„óHÇÂHÇÁH‰œ$hH‰”$pH‰Œ$xHH‰$Hœ$H‰\$èH‹L$H‹D$H‹œ$hH‰$H‰Œ$°H‰L$H‰„$¸H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$hH‰\$H‹œ$pH‰\$H‹œ$xH‰\$ èH‹L$(H‹D$0HÇ„$¸HÇ„$ÀH‰Œ$ÈH‰„$ÐHÄ ÉéÿÿÿH‰”$H‰¬$HH‰$Hœ$H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹œ$ H‹l$@L‹„$(L9ŃHkíHëH‰$èH‹L$XH‹D$PHƒÁHÿÀH‹l$HH9茚ýÿÿH‹¼$¨Hƒÿ„½Ho H<$H‰îH¥H¥H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹L$(H‹D$0H‹œ$¨H‹kH‰,$H‰Œ$ H‰L$H‰„$¨H‰D$è¶\$€û…9H‹œ$¨Hƒû„ H‹S0H‹K8H‹k@H‰¬$`1ÀH‰Œ$XH‰L$HH‰”$PH‰ÑH‹l$HH9èˆH‰L$XHƒù„ÎH‹H‹iH‰D$PH‰D$8H‰”$H‰¬$˜HH‰$H‹œ$°H‰\$H‰”$ÀH‰T$H‰¬$ÈH‰l$èH‹\$ Hƒû„`H‹3H‹SH‹œ$¨Hƒû„@H‹KHH‹CPH‹kXH‰¬$HH‰Œ$8H‹l$8H‰„$@H9ŃHéH‹+H‰,$H‰´$ðH‰t$H‰”$øH‰T$è¶\$€û…pH‹Œ$H‹„$˜HH‰$H‹œ$°H‰\$H‰Œ$àH‰L$H‰„$èH‰D$èH‹\$ Hƒû„H‹+H‰¬$H‹kH‰¬$H‹œ$¨Hƒû„ãH‹KHH‹CPH‹kXH‰¬$HH‰Œ$8H‹l$8H‰„$@H9ŃªHéH‹Hƒû„’H‹ H‹kH‰L$`H‰Œ$ÐH‰l$hH‰¬$ØH¼$€1ÀèHœ$€Hƒû„IHÇÁHÇÂH‰œ$hH‰Œ$pH‰”$xHH‰$Hœ$H‰\$èH‹L$H‹D$H‹œ$hH‰$H‰Œ$°H‰L$H‰„$¸H‰D$èHH‰$Hœ$ÐH‰\$èH‹L$H‹D$H‹œ$hHƒÃH‰$H‰Œ$°H‰L$H‰„$¸H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$hH‰\$H‹œ$pH‰\$H‹œ$xH‰\$ èH‹L$(H‹D$0HÇ„$¸HÇ„$ÀH‰Œ$ÈH‰„$ÐHÄ Éé°þÿÿ‰égþÿÿè ‰éþÿÿ‰éæýÿÿH‹L$XH‹D$PHƒÁHÿÀH‹l$HH9èŒxüÿÿH‹œ$ H‰œ$¸H‹œ$¨H‰œ$ÀHÇ„$ÈHÇ„$ÐHÄ Ãè ‰é¹üÿÿ‰é™üÿÿ‰é+üÿÿ‰éÙûÿÿ뛉é<ûÿÿè ‰é1ùÿÿ‰é¾øÿÿ‰éløÿÿB +00runtime.morestack_noctxtÈ&type.[]interface {}î"runtime.makesliceÆ,type.map[string]string¦4runtime.mapaccess2_faststrîtype.stringšruntime.convT2E„ 2runtime.writebarrieriface’ Tgo.string."mux: missing route variable %q"† +fmt.Errorf¶ type.stringâ runtime.convT2Eæ 2runtime.writebarrieriface¾fmt.Sprintf°8regexp.(*Regexp).MatchStringÖ,type.map[string]string¶4runtime.mapaccess1_faststr¾8regexp.(*Regexp).MatchStringˆ,type.map[string]stringè4runtime.mapaccess1_faststr¾ð runtime.duffzero¼type.stringèruntime.convT2EÒ2runtime.writebarrierifaceàtype.stringŒruntime.convT2Eþ2runtime.writebarrierifaceŒngo.string."mux: variable %q doesn't match, expected %q"€fmt.Errorfš$runtime.panicindex‚ $runtime.panicindexÚ $runtime.panicindex`ÀR"".autotmp_0492"type.interface {}"".autotmp_0491"type.interface {}"".autotmp_0489&type.[]interface {}"".autotmp_0488type.string"".autotmp_0487type.*string"".autotmp_0486type.int"".autotmp_0485type.int"".autotmp_0484ß"type.interface {}"".autotmp_0482&type.[]interface {}"".autotmp_0480¿type.string"".autotmp_0479type.*string"".autotmp_0478¯type.int"".autotmp_0477type.int"".autotmp_0476type.error"".autotmp_0475Ÿtype.string"".autotmp_0474type.string"".autotmp_0473ÿtype.string"".autotmp_0472?(type.[2]interface {}"".autotmp_0470ßtype.string"".autotmp_0469type.string"".autotmp_0468type.[]string"".autotmp_0466type.string"".autotmp_0465type.string"".autotmp_0463¿type.string"".autotmp_0462Ÿ(type.[1]interface {}"".autotmp_0461type.string"".autotmp_0460Ÿtype.[]string"".autotmp_0459o&type.[]interface {}"".autotmp_0458Ÿtype.int "".~r0ÿtype.string"".vŸtype.string"".kÏtype.int +"".rvÿtype.string"".valueßtype.string"".v¿type.string"".k¿type.int"".urlValuesÿ&type.[]interface {} "".~r2@type.error "".~r1 type.string"".values,type.map[string]string"".r(type.*"".routeRegexp6%À›¿À¼ ¿À¿ÀPÐtÀUMnl Ão W=nÙm½'˜@ Lvܺvn®9 ÃÙ €ÌMt , +$Tgclocals·87979038f036a055d96e4dae0820fce3Tgclocals·016ee616535ff5ac97db8c28536faf6d¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ:"".(*routeRegexp).getUrlQueryÀ +° +eH‹ %HD$ H;AwèëåHìàH‹„$èHÇ„$øHÇ„$¶X€ûu HÇ„$øHÇ„$HÄàÃH(H$H‰ßH‰îH¥H¥HHl$H‰ïH‰ÞH¥H¥HÇD$ èH‹L$(H‹D$0H‹\$8Hƒø†ØH‹)H‰l$@H‹iH‰l$HH‹œ$ðH‹kH‰,$èH‹L$H¼$1ÀèHH‰$H‰L$Hœ$H‰\$èL‹D$HH‹œ$1íH9ë„6H‹œ$˜Hƒû„KH‹;H‹sH‹SH‹œ$Hƒû„'H‹ H‰L$PH‹CH‰|$xH‰|$`H‰´$€H‰t$hH‰”$ˆH‰T$pH‰D$XL9À… H‰ $H‰D$H‹l$@H‰l$L‰D$èH‹L$h¶\$ €ûttHƒù~nH‹\$PH‰$H‹t$XH‰t$H5Hl$H‰ïH¥H¥H‹\$`Hƒùv6Hl$ H‰ïH‰ÞH¥H¥èH‹\$0H‰œ$øH‹\$8H‰œ$HÄàÃè Hœ$H‰$èL‹D$HH‹œ$1íH9ë…ÊþÿÿHÇ„$øHÇ„$HÄàÉéÒþÿÿ‰é®þÿÿè  +*0runtime.morestack_noctxtþgo.string."="¸strings.SplitN¶(net/url.(*URL).QueryÞØ runtime.duffzeroì&type.net/url.Values¢&runtime.mapiterinitæ runtime.eqstringÄgo.string."="š*runtime.concatstring3è$runtime.panicindexŽ &runtime.mapiternext¤ +$runtime.panicindex@À"".autotmp_0506type.[]string"".autotmp_0504Ÿ:type.map.iter[string][]string"".autotmp_0501Ïtype.[]string"".valsÿtype.[]string "".keyŸtype.string"".templateKey¿type.string "".~r1 type.string "".req,type.*net/http.Request"".r(type.*"".routeRegexp2"ÀH¿ÀÇ¿ÀO¿À 2øB  _Â@n) + &›?6¢Z:KTgclocals·f99f470b4e8bf0bbfec1c215fb234ac7Tgclocals·d0cd7946f7c85d974217a4cbfbe17824¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþD"".(*routeRegexp).matchQueryStringààeH‹ %H;awèëêHƒì0H‹\$8H‰$H‹\$@H‰\$èH‹L$H‹D$H‹\$8H‹kH‰,$H‰L$ H‰L$H‰D$(H‰D$è¶\$ˆ\$HHƒÄ0à + 0runtime.morestack_noctxt\:"".(*routeRegexp).getUrlQuery¼8regexp.(*Regexp).MatchString0`"".autotmp_0507type.string "".~r1 type.bool "".req,type.*net/http.Request"".r(type.*"".routeRegexp`U_p ’V +-CTgclocals·d3486bc7ce1948dc22d7ad1c0be2887aTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ"".braceIndices –eH‹ %HD$¨H;AwèëåHìØHÇ„$ðHÇ„$øHÇ„$HÇ„$HÇ„$HÇD$@HÇD$HHH‰$HÇD$HÇD$èL‹œ$àL‹Œ$èH‹D$@H‹|$L‹T$ H‹t$(H‰¼$ÀH‰¼$L‰”$ÈL‰”$˜H‰´$ÐH‰´$ 1ÉL9É}0L9ɃÎI ¶+@€ý{…ˆHÿÀHƒøuH‰L$HHÿÁL9É|ÐHƒø„0L‰\$pL‰L$xHœ$€HÇHÇCHœ$€Hƒû„öHÇÁHÇÂH‰œ$¨H‰Œ$°H‰”$¸HH‰$H\$pH‰\$èH‹L$H‹D$H‹œ$¨H‰$H‰L$`H‰L$H‰D$hH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$¨H‰\$H‹œ$°H‰\$H‹œ$¸H‰\$ èH‹L$(H‹D$0HÇ„$ðHÇ„$øHÇ„$H‰Œ$H‰„$HÄØÉéÿÿÿH‰¼$ðL‰”$øH‰´$HÇ„$HÇ„$HÄØÃ@€ý}…|þÿÿHÿÈH‰D$@Hƒø…ßH‰ËH‰L$PHÿÃH‰\$XL‰ÒH‰óL)ÓHƒû}iHH‰$H‰¼$ÀH‰|$H‰”$ÈH‰T$H‰´$ÐH‰t$HÇD$ èL‹œ$àL‹Œ$èH‹L$PH‹D$@H‹|$(H‹T$0H‹t$8I‰ÒIƒÂH×H‹l$HH‰+HÿÂH×H‹l$XH‰+H‰¼$ÀL‰”$ÈH‰´$ÐH‰¼$L‰”$˜H‰´$ é‹ýÿÿHƒøýÿÿL‰\$pL‰L$xHœ$€HÇHÇCHœ$€Hƒû„öHÇÂHÇÁH‰œ$¨H‰”$°H‰Œ$¸HH‰$H\$pH‰\$èH‹L$H‹D$H‹œ$¨H‰$H‰L$`H‰L$H‰D$hH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$¨H‰\$H‹œ$°H‰\$H‹œ$¸H‰\$ èH‹L$(H‹D$0HÇ„$ðHÇ„$øHÇ„$H‰Œ$H‰„$HÄØÉéÿÿÿè " +*0runtime.morestack_noctxtætype.[]intœ"runtime.makeslice†type.string¬runtime.convT2EŠ2runtime.writebarrieriface˜Pgo.string."mux: unbalanced braces in %q"Œfmt.Errorfš type.[]intŒ "runtime.growsliceàtype.string†runtime.convT2Eä2runtime.writebarrierifaceòPgo.string."mux: unbalanced braces in %q"æfmt.ErrorfŠ$runtime.panicindexp°."".autotmp_0533"type.interface {}"".autotmp_0532*type.*[1]interface {}"".autotmp_0531&type.[]interface {}"".autotmp_0530ï"type.interface {}"".autotmp_0528_&type.[]interface {}"".autotmp_0523type.[]int"".autotmp_0522type.int"".autotmp_0520type.error"".autotmp_0519type.string"".autotmp_0518(type.[1]interface {}"".autotmp_0515Ïtype.string"".autotmp_0514¯(type.[1]interface {}"".autotmp_0512type.int"".autotmp_0511ÿtype.int"".autotmp_0510type.int"".autotmp_0509/type.[]int"".itype.int"".idxstype.[]int "".idxŸtype.int"".level¯type.int "".~r2Ptype.error "".~r1 type.[]int"".stype.string4"°­¯°>¯°­¯°Ð Vž^v +  +°8 +Ú +° *ˆp€ýpR Tgclocals·9680905063a74374258fdae79a25b518Tgclocals·36d420fa591ebaf09d3a180d960e2a08¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ"".varGroupName –eH‹ %H;awèëêHƒì@HÇD$PHÇD$XH‹\$HH‰$èH‹L$H‹D$HH,$H‰ïH‰ÞH¥H¥H‰L$0H‰L$H‰D$8H‰D$èH‹\$ H‰\$PH‹\$(H‰\$XHƒÄ@à + + 0runtime.morestack_noctxtlstrconv.ItoaŽgo.string."v"Ü*runtime.concatstring20€"".autotmp_0543type.string "".~r1type.string "".idxtype.int€p Î,d +5[Tgclocals·a73fd2a0c6f832642aa9216fd9c5e6beTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ>"".(*routeRegexpGroup).setMatchà8Ö8eH‹ %H„$øýÿÿH;AwèëâHìˆH‹¬$H‹]1íH9ë„ÉH‹œ$˜H‰$èH‹L$H‹D$H‹œ$H‹H‹kH‰,$H‰Œ$XH‰L$H‰„$`H‰D$èH‹T$H‹L$ H‹D$(H‰Œ$0H‰„$8HƒúH‰”$(„KH‹œ$H‹H‹[Hƒû„Q H‹S`H‹KhH‹kpH‰”$hH‰Œ$pH‰¬$x1ÿH‰¬$ðH‰Œ$èHƒù‚ + H‰”$àH‰ÐH‰ÊHÿÊH‰éHÿÉHƒùtHƒÀH‰Œ$€1öH‰”$xH‰T$xH‰„$pH‰„$°H‹l$xH9îœH‹œ$°Hƒû„› H‹ H‹kH‰t$pH‰t$`H‰Œ$XH‰Œ$H‰¬$`H‰¬$ Hƒý„*H‰|$@H‰<$èH‹|$@H‹t$pH‹Œ$ H‹T$H‰”$XH‹D$H‰„$`H9Á…çH‹¬$H‰,$H‰L$H‰T$H‰D$èH‹|$@H‹t$p¶\$ €û„¯HH‰$H‹œ$ H‹kH‰l$H‹œ$H‹+Hƒý„¬ H‹M0H‹E8H‹]@H‰œ$PH‰ËH‰Œ$@H‰ýH‰„$HH9ǃr HkíHëH‰\$H‹\$`HÿÃH‹¬$(L‹„$0L9Ã> HkÛHÝH‰l$èH‹t$pH‹|$@HÿÇH‹œ$°HƒÃH‰œ$°HÿÆH‹l$xH9îŒdþÿÿH‹¬$H‹]1íH9ë„ H‹œ$H‹[H‹kH‰,$H‹´$˜H‹~Hƒÿ„³ +Hw8H|$H¥H¥èH‹T$H‹L$ H‹D$(H‰Œ$H‰„$ HƒúH‰”$„žH‹œ$H‹[H‹[Hƒû„O +H‹S`H‹KhH‹kpH‰”$€H‰Œ$ˆH‰¬$1öH‰¬$ÀH‰Œ$¸Hƒù‚ +H‰”$°H‰ÐH‰ÊHÿÊH‰éHÿÉHƒùtHƒÀH‰Œ$P1ÿH‰”$HH‰”$ˆH‰„$@H‰„$°H‹¬$ˆH9ï H‹œ$°Hƒû„“ H‹ H‹kH‰|$pH‰|$PH‰Œ$XH‰Œ$8H‰¬$`H‰¬$@Hƒý„+H‰t$8H‰4$èH‹|$pH‹t$8H‹Œ$@H‹T$H‰”$XH‹D$H‰„$`H9Á…èH‹¬$8H‰,$H‰L$H‰T$H‰D$èH‹|$pH‹t$8¶\$ €û„°HH‰$H‹œ$ H‹kH‰l$H‹œ$H‹kHƒý„£H‹M0H‹E8H‹]@H‰œ$€H‰ËH‰Œ$pH‰õH‰„$xH9ƃiHkíHëH‰\$H‹\$PHÿÃH‹¬$L‹„$L9Ã5HkÛHÝH‰l$èH‹|$pH‹t$8HÿÆH‹œ$°HƒÃH‰œ$°HÿÇH‹¬$ˆH9ïŒ`þÿÿH‹¬$H‹m¶]€û„/H‹œ$˜H‹kHƒý„¸L‹E8L‰„$ØH‹M@HH‹;H‰¼$øH‹CH‰Œ$àH‰„$H9ÁŒkH‰ÊH)ÂH‰ÎH9Ñ‚`L‰ÁH)ÖHƒþt H‰ÓHËH‰ÙH‰Œ$XH‰´$`H9Æ…+H‰ $H‰t$H‰|$H‰D$è¶\$ €û„HÇÀˆD$7H‹œ$H‹kHƒý„ÜL‹EL‰„$èH‹MHH‹;H‰¼$H‹CH‰Œ$ðH‰„$H9ÁŒH‰ÊH)ÂH‰ÎH9Ñ‚„L‰ÁH)ÖHƒþt H‰ÓHËH‰ÙH‰Œ$XH‰´$`H9Æ…OH‰ $H‰t$H‰|$H‰D$è¶\$ €û„)HÇÀ¶\$78ÄšH‹œ$˜H‹kH‰,$èH‹L$H‹D$H‰Œ$XH‰ $H‰„$`H‰D$èH‹D$€|$7„/H‹H@HÿÉH‹P@H9Ê‚H‹P8H‰”$XH‰P8H‰Œ$`H‰H@H‰$èH‹L$H‹D$H‰Œ$XH‰Œ$ÈH‰„$`H‰„$ÐHÇD$h-HÇ„$¸HÇ„$ÀHH‰$èH‹D$H‰„$¨H‰$Hƒ<$„lH‹œ$ÈH‰\$H‹œ$ÐH‰\$èH‹„$¨H‹l$hH‰hH‰„$¨H‹1íH9è„ðH‹Œ$¨H‰„$HH‰Œ$PH‹œ$ H‰$Hƒ<$„µHƒ$H‰„$¸H‰D$H‰Œ$ÀH‰L$èH‹œ$Hƒû„xH‹SH‹CH‹k H‰¬$h1ÉH‰„$`H‰D$pH‰”$XH‰ÐH‹l$pH9é +H‰„$ H‹(H‰Œ$ˆH‰¬$˜H‰,$H‹œ$˜H‰\$èH‹L$H‹D$H‹œ$˜H‹kH‰,$H‰Œ$XH‰L$H‰„$`H‰D$èH‹T$H‹L$ H‹D$(H‰Œ$H‰„$HƒúH‰”$ø„JH‹œ$˜H‹[Hƒû„„H‹S`H‹KhH‹kpH‰”$˜H‰Œ$ H‰¬$¨1öH‰¬$ØH‰Œ$ÐHƒù‚=H‰”$ÈH‰ÐH‰ÊHÿÊH‰éHÿÉHƒùtHƒÀH‰Œ$P1ÿH‰”$HH‰”$€H‰„$@H‰„$°H‹¬$€H9ï˜H‹œ$°Hƒû„ÈH‹ H‹kH‰|$xH‰|$XH‰Œ$XH‰Œ$(H‰¬$`H‰¬$0Hƒý„#H‰t$HH‰4$èH‹|$xH‹t$HH‹Œ$0H‹T$H‰”$XH‹D$H‰„$`H9Á…àH‹¬$(H‰,$H‰L$H‰T$H‰D$èH‹|$xH‹t$H¶\$ €û„¨HH‰$H‹œ$ H‹kH‰l$H‹œ$˜Hƒû„ÝH‹K0H‹C8H‹k@H‰¬$€H‰ËH‰Œ$pH‰õH‰„$xH9ƃ£HkíHëH‰\$H‹\$XHÿÃH‹¬$øL‹„$L9ÃssHkÛHÝH‰l$èH‹|$xH‹t$HHÿÆH‹œ$°HƒÃH‰œ$°HÿÇH‹¬$€H9ïŒhþÿÿH‹„$ H‹Œ$ˆHƒÀHÿÁH‹l$pH9éŒöüÿÿHĈÃè è ‰éÿÿÿ‰é1þÿÿè ‰éuýÿÿ‰éüÿÿ‰%é?üÿÿHH‰$HH‰\$HH‰\$èH‹D$éÞûÿÿ‰%éˆûÿÿè H‰„$Hƒø„€H‹H8H‹x@H‰Œ$XH‰ $H‰¼$`H‰|$HH|$H‰ÞH¥H¥èH\$ Hl$H‰ïH‰ÞH¥H¥H‹œ$H‰$Hƒ<$tHƒ$8èH‹„$éxúÿÿ‰%ëà‰éyÿÿÿ1Àé×ùÿÿè ‰Eéùÿÿ1Àéûøÿÿè ‰Eé@øÿÿè è ‰EéU÷ÿÿ‰éföÿÿè ‰éªõÿÿ‰éFõÿÿè è ‰EéLôÿÿ‰é^óÿÿè ‰é¨òÿÿd +00runtime.morestack_noctxt’"".getHostŠFregexp.(*Regexp).FindStringSubmatch€"".varGroupNameª runtime.eqstringè,type.map[string]stringž $runtime.mapassign1¨ Fregexp.(*Regexp).FindStringSubmatch¬"".varGroupNameÖ runtime.eqstring”,type.map[string]stringÌ$runtime.mapassign1Âgo.string."/" runtime.eqstringÎgo.string."/"Î runtime.eqstring¼*net/url.(*URL).StringŒnet/url.Parsež *net/url.(*URL).StringÂ!:type.net/http.redirectHandlerÔ!"runtime.newobjectÊ"4runtime.writebarrierstringŠ#dgo.itab.*net/http.redirectHandler.net/http.HandlerÆ$2runtime.writebarrierifaceÄ&:"".(*routeRegexp).getUrlQuery¶'Fregexp.(*Regexp).FindStringSubmatch²,"".varGroupNameÜ- runtime.eqstringš.,type.map[string]stringÂ0$runtime.mapassign12$runtime.panicindexž2$runtime.panicindexÈ2$runtime.panicsliceŽ3±¨(Æ B 0 av:± ( %  J„   ^H÷U¿ÂU¶É ;~ ÷Uš` M +Z ‡Tgclocals·4fc7bdfae1004e17f25f57b4d9db22c7Tgclocals·d36b9b8893a5ece0bc46499614098043¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ"".getHostàÜeH‹ %H;awèëêHƒì8H‹L$@HÇD$HHÇD$PH‹iH‹]Hƒû…Ÿ1À<t&H‹iHƒýtH‹](H‰\$HH‹]0H‰\$PHƒÄ8ÉEëäH‰ÎHƒùtfH‹IxH‹¾€H‰L$(H‰ $H‰|$0H‰|$H5H|$H¥H¥èH‹T$(H‹L$0H‹D$ HƒøÿtH9ÁrH‰ÁH‰T$HH‰L$PHƒÄ8Ãè ‰ë–HÇÀéWÿÿÿ + + 0runtime.morestack_noctxt²go.string.":"Îstrings.Index°$runtime.panicslice0p"".hosttype.string "".~r1type.string"".r,type.*net/http.RequestpOoplopð,æ1&=¦1Tgclocals·d7e8a62d22b1cde6d92b17a55c33fe8fTgclocals·8d600a433c6aaa81a4fe446d95c5546b¬/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.goþ""".(*Route).Matchà Î eH‹ %H;awèëêHƒìpH‹´$ˆH‹l$x¶]A€û…&H‹\$xHƒ{X…H‹\$xHƒû„H‹S H‹C(H‹k0H‰l$h1ÉH‰D$`H‰D$ H‰T$XH‰ÐH‹l$ H9é‰H‰D$0Hƒø„·H‹H‹hH‰L$(H‰T$HH‰l$PH‹œ$€H‰\$H‰t$H‰l$@H‰,$H‰T$8H‹Z ÿÓH‹´$ˆ¶\$€ûu Æ„$HƒÄpÃH‹D$0H‹L$(HƒÀHÿÁH‹l$ H9éŒwÿÿÿH‹1íH9ëu&H‰4$Hƒ<$„H‹\$xH‰\$èH‹´$ˆHƒ~u@H‰4$Hƒ<$„ßHƒ$H‹|$xHƒÿ„ÄHoH|$H‰îH¥H¥èH‹´$ˆH‹^1íH9ëuHHH‰$HÇD$èH‹D$H‹œ$ˆH‰$Hƒ<$tbHƒ$H‰D$èH‹´$ˆH‹l$xH‹]81íH9ët.H‹\$xH‹k8H‰,$H‹œ$€H‰\$H‰t$H‹\$xH‰\$èÆ„$HƒÄpÉ%땉é5ÿÿÿ‰%éÿÿÿ‰%éÜþÿÿ‰éBþÿÿ‰éùýÿÿÆ„$HƒÄpà + 0runtime.morestack_noctxtž +æ.runtime.writebarrierptrô2runtime.writebarrieriface¨,type.map[string]stringÌruntime.makemapš.runtime.writebarrierptr¦>"".(*routeRegexpGroup).setMatch@à"".autotmp_0644Otype."".matcher"".autotmp_0643 type.*"".matcher"".autotmp_0642Ÿtype.int"".autotmp_0641type.int"".autotmp_0638/"type.[]"".matcher"".motype."".matcher "".~r20type.bool"".match &type.*"".RouteMatch "".req,type.*net/http.Request"".rtype.*"".Route.àÕßà³ßàBß +ðRR""g8  +&@ H.  Ïc¾Tgclocals·d69c4140875de858f5dc9e2e8acb0bc0Tgclocals·4398bb51467914f29637b614067b995fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ("".(*Route).GetError`DH‹\$HƒûtH‹kXH‰l$H‹k`H‰l$Éëé0 "".~r0type.error"".rtype.*"".Route00”0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ*"".(*Route).BuildOnly@,H‹D$HÇÅ@ˆhAH‰D$à  "".~r0type.*"".Route"".rtype.*"".Route  œ Tgclocals·06cab038d51064a089bda21fa03e00f7Tgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ&"".(*Route).HandleràÎeH‹ %H;awèëêHƒìH‹D$ HƒxXu.H‰$Hƒ<$t-Hƒ$H‹\$(H‰\$H‹\$0H‰\$èH‹D$ H‰D$8HƒÄÉ%ëÊ + 0runtime.morestack_noctxt–2runtime.writebarrieriface@0 "".~r10type.*"".Route"".handler*type.net/http.Handler"".rtype.*"".Route0C/0p¬. + +J&Tgclocals·433981679ca6b8ba029d40d9f4c7048cTgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ."".(*Route).HandlerFunc ”eH‹ %H;awèëêHƒìPH‹t$XH‰t$ H‹\$`H‰\$(H‹1íH9ètWH‹T$(H‰D$@H‰T$HHƒ~Xu.H‰4$Hƒ<$t-Hƒ$H‰D$0H‰D$H‰T$8H‰T$èH‹t$ H‰t$hHƒÄPÉ%ëÊHH‰$HH‰\$HH‰\$èH‹t$ H‹D$érÿÿÿ + 0runtime.morestack_noctxtbZgo.itab.net/http.HandlerFunc.net/http.Handlerî2runtime.writebarrieriface¬2type.net/http.HandlerFuncÂ*type.net/http.HandlerÚZgo.itab.net/http.HandlerFunc.net/http.Handlerî runtime.typ2Itab0 "".autotmp_0646*type.net/http.Handler"".autotmp_0645O2type.net/http.HandlerFunc"".handler?*type.net/http.Handler"".r_type.*"".Route "".~r1 type.*"".Route"".fjtype.func(net/http.ResponseWriter, *net/http.Request)"".rtype.*"".Route oŸ Fм¶v@Tgclocals·3cd76c4f8d01c613585e17871258aa07Tgclocals·deb2efaee408e42e52d8a1e8a4a979e1ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ,"".(*Route).GetHandler`DH‹\$HƒûtH‹kH‰l$H‹kH‰l$Éëé0 "".~r0*type.net/http.Handler"".rtype.*"".Route00È0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ "".(*Route).NameÀ +² +eH‹ %HD$ØH;AwèëåHì¨H‹¬$°H‹]PHƒû„zH‹œ$¸H‰\$`H‹œ$ÀH‰\$hH¼$ˆ1ÀèHœ$ˆHƒû„HÇÂHÇÁH‰\$pH‰T$xH‰Œ$€HH‰$H‰l$Hƒ|$„×HƒD$HèH‹L$H‹D$H‹\$pH‰$H‰L$@H‰L$H‰D$HH‰D$èHH‰$H\$`H‰\$èH‹L$H‹D$H‹\$pHƒÃH‰$H‰L$@H‰L$H‰D$HH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹\$pH‰\$H‹\$xH‰\$H‹œ$€H‰\$ èH‹L$(H‹D$0H‹œ$°H‰$Hƒ<$„îHƒ$XH‰L$PH‰L$H‰D$XH‰D$èH‹¬$°Hƒ}X…¡H‰,$Hƒ<$„¢Hƒ$HH‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹œ$°H‰$èH‹D$H‹œ$¸H‰\$`H‹œ$ÀH‰\$hH‹œ$°H‰\$8HH‰$H‰D$H\$`H‰\$H\$8H‰\$èH‹¬$°H‰¬$ÈHĨÉ%éRÿÿÿ‰%éÿÿÿ‰%éþÿÿ‰éÚýÿÿ +*0runtime.morestack_noctxtºð runtime.duffzero¬type.stringìruntime.convT2EÄ2runtime.writebarrierifaceÒtype.stringøruntime.convT2EØ2runtime.writebarrierifaceæpgo.string."mux: route already has name %q, can't set %q"Îfmt.ErrorfÌ2runtime.writebarrierifaceØ4runtime.writebarrierstringú4"".(*Route).getNamedRoutesà2type.map[string]*"".Route¤ $runtime.mapassign1@Ð"".autotmp_0658"type.interface {}"".autotmp_0657Ï"type.interface {}"".autotmp_0655o&type.[]interface {}"".autotmp_0654ßtype.*"".Route"".autotmp_0653type.string"".autotmp_0651¯type.error"".autotmp_0650type.string"".autotmp_0649?(type.[2]interface {} "".~r10type.*"".Route"".nametype.string"".rtype.*"".Route"ÐËÏÐ2 *Ö*ú 3n &µ±…fOTgclocals·4205cab2470caaf976442750814b93e4Tgclocals·609fbbd38973bf0432058b6e5d6645e9ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ&"".(*Route).GetName`DH‹\$HƒûtH‹kHH‰l$H‹kPH‰l$Éëé0 "".~r0type.string"".rtype.*"".Route00ò0Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ,"".(*Route).addMatcher€üeH‹ %H;awèëêHƒìpH‹D$xHƒxX…þH‹P H‹H(H‹X0H‰T$XH‰L$`H‰\$hH‰ØH)ËHƒû}FHH‰$H‰T$@H‰T$H‰L$HH‰L$H‰D$PH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$HH‰D$PH‰ÓH‰T$@H‰ÍHkíHëH‰$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹T$@H‹L$HH‹D$PH‹\$xH‰$Hƒ<$t:Hƒ$ H‰T$XH‰T$H‰L$`H‰L$H‰D$hH‰D$èH‹D$xH‰„$HƒÄpÉ%ë½ + 0runtime.morestack_noctxt¨"type.[]"".matcherˆ"runtime.growslice°2runtime.writebarrieriface¾2runtime.writebarrierslice@à "".autotmp_0664_"type.[]"".matcher"".autotmp_0663/"type.[]"".matcher"".autotmp_0662"type.[]"".matcher "".~r10type.*"".Route"".mtype."".matcher"".rtype.*"".Routeàšßà ÀŒ þ  ƒTG"Tgclocals·a69e79957b5150531998200513ab99eeTgclocals·23c4785fa8abd7e258acfe91c9f325f3ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ8"".(*Route).addRegexpMatcher %Ž%eH‹ %H„$(ÿÿÿH;AwèëâHìXH‹„$`HÇ„$€HÇ„$ˆHƒxXt H‹hXH‰¬$€H‹h`H‰¬$ˆHÄXÃH‰$èH‹D$H‹œ$`H‰$Hƒ<$„ªHƒ$8H‰D$èH‹´$`H‹Œ$hH‹„$p€¼$x…Í€¼$z…¿Hƒø„;Hƒø†L¶€û/…%H‹n8H‹]1íH9ë„ŒH‹v8H‹~Hƒÿ„ùH7H<$H¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹L$ H‹D$(H‰Œ$°H‰ $H‰„$¸H‰D$H‹œ$hH‰\$H‹œ$pH‰\$èH‹´$`H‹L$ H‹D$(H‰Œ$hH‰ $H‰„$pH‰D$¶œ$xˆ\$¶œ$yˆ\$¶œ$zˆ\$¶n@@ˆl$èL‹„$`H‹t$H‰t$PH‹D$ H‹L$(H‰Œ$ˆHƒøH‰„$€tH‰„$€H‰Œ$ˆHÄXÃI‹h8Hƒý„ÔH‹UH‹EH‹] H‰œ$P1ÉH‰„$HH‰D$@H‰”$@H‰ÐH‹l$@H9é³H‰D$hH‹H‰L$HHƒþ„zHn0H$H‰ßH‰îH¥H¥H¥Hƒø„UHh0H\$H‰ßH‰îH¥H¥H¥èL‹„$`H‹t$PH‹L$0H‹D$8H‰„$ˆHƒùH‰Œ$€tH‰Œ$€H‰„$ˆHÄXÃH‹D$hH‹L$HHƒÀHÿÁH‹l$@H9éŒMÿÿÿ€¼$x„®I‹h8H‹]1íH9넉Hƒþ„ŠHn0H$H‰ßH‰îH¥H¥H¥I‹p8H‹~Hƒÿ„]Hw0H|$H¥H¥H¥èL‹„$`H‹t$PH‹D$0H‹L$8H‰Œ$ˆHƒøH‰„$€tH‰„$€H‰Œ$ˆHÄXÃI‹h8H‰,$Hƒ<$„åH‰t$èH‹”$`H‰T$XH‹\$PH‰\$`H‹1íH9è„{H‹L$`H‰„$H‰D$pH‰Œ$˜H‰L$xHƒzX…(H‰ÓH‹R H‹K(H‹[0H‰”$øH‰Œ$H‰œ$H‰ØH)ËHƒû}OHH‰$H‰”$àH‰T$H‰Œ$èH‰L$H‰„$ðH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰¬$èH‰„$ðH‰ÓH‰”$àH‰ÍHkíHëH‰$H‹\$pH‰\$H‹\$xH‰\$èH‹”$àH‹Œ$èH‹„$ðH‹\$XH‰$Hƒ<$tVHƒ$ H‰”$øH‰T$H‰Œ$H‰L$H‰„$H‰D$èH‹T$XHÇ„$€HÇ„$ˆHÄXÉ%ë¡HH‰$HH‰\$HH‰\$èH‹T$XH‹D$éNþÿÿ‰%éþÿÿ‰éœýÿÿ‰éoýÿÿI‹h8H‹]1íH9너Hƒþ„÷Hn0H$H‰ßH‰îH¥H¥H¥I‹p8H‹>Hƒÿ„ËHw0H|$H¥H¥H¥èL‹„$`H‹t$PH‹D$0H‹L$8H‰Œ$ˆHƒøH‰„$€tH‰„$€H‰Œ$ˆHÄXÀ¼$z„8I‹h8Hƒý„"H‹UH‹MH‹] H‰”$@H‰Œ$HH‰œ$PH‰ØH)ËHƒû}THH‰$H‰”$H‰T$H‰Œ$H‰L$H‰„$ H‰D$HÇD$ èH‹t$PH‹T$(H‹L$0H‹D$8H‰ÏHÿÇH‰¼$H‰„$ H‰”$HÊH‰$H‰t$èH‹”$H‹Œ$H‹„$ H‹œ$`H‹k8H‰,$Hƒ<$t6Hƒ$H‰”$@H‰T$H‰Œ$HH‰L$H‰„$PH‰D$èé;üÿÿ‰%ëÁ‰EéÖþÿÿI‹h8H‰,$Hƒ<$tHƒ$H‰t$èéüÿÿ‰%ëã‰é.þÿÿ‰éþÿÿ‰é¤úÿÿ‰éúÿÿ‰Eé$úÿÿ‰éùÿÿH‰Œ$ÀH‰„$ÈHœ$ÐHÇHÇCHœ$ÐHƒû„ÛHÇÂHÇÁH‰œ$(H‰”$0H‰Œ$8HH‰$Hœ$ÀH‰\$èH‹L$H‹D$H‹œ$(H‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$(H‰\$H‹œ$0H‰\$H‹œ$8H‰\$ èH‹L$(H‹D$0H‰Œ$€H‰„$ˆHÄXÉéÿÿÿè ‰%éJ÷ÿÿ@ +00runtime.morestack_noctxtâ4"".(*Route).getRegexpGroup¸.runtime.writebarrierptrÎgo.string."/"ö"strings.TrimRightú*runtime.concatstring2´""".newRouteRegexpú +"".uniqueVarsŒ"".uniqueVarsÐ.runtime.writebarrierptrŒDgo.itab.*"".routeRegexp."".matcherê"type.[]"".matcherÜ"runtime.growsliceŠ2runtime.writebarrieriface¼2runtime.writebarrierslice¦(type.*"".routeRegexp¼type."".matcherÔDgo.itab.*"".routeRegexp."".matcherè runtime.typ2Itabæ"".uniqueVars–,type.[]*"".routeRegexpˆ"runtime.growslice.runtime.writebarrierptrÐ2runtime.writebarrierslice¸.runtime.writebarrierptrö!type.string¢"runtime.convT2EŒ#2runtime.writebarrierifaceš#jgo.string."mux: path must start with a slash, got %q"Ž$fmt.Errorfê$$runtime.panicindex`°L"".autotmp_0705type.uint64"".autotmp_0704type.uint64"".autotmp_0703type.int"".autotmp_0702type.int"".autotmp_0701ï"type.[]"".matcher"".autotmp_0700¿"type.[]"".matcher"".autotmp_0698type."".matcher"".autotmp_0695type.int"".autotmp_0694type.int"".autotmp_0693,type.[]*"".routeRegexp"".autotmp_0692,type.[]*"".routeRegexp"".autotmp_0691ï(type.*"".routeRegexp"".autotmp_0690ß*type.**"".routeRegexp"".autotmp_0689type.int"".autotmp_0688type.int"".autotmp_0687ï"type.interface {}"".autotmp_0685_&type.[]interface {}"".autotmp_0684"type.[]"".matcher"".autotmp_0683(type.*"".routeRegexp"".autotmp_0682,type.[]*"".routeRegexp"".autotmp_0681type.error"".autotmp_0680type.error"".autotmp_0679type.error"".autotmp_0678/,type.[]*"".routeRegexp"".autotmp_0677Ïtype.string"".autotmp_0675¯type.string"".autotmp_0674(type.[1]interface {}"".autotmp_0673Ÿtype.int"".mÏtype."".matcher"".rÿtype.*"".Route "".err¯type.error +"".rr(type.*"".routeRegexp "".~r4@type.error"".matchQuery4type.bool"".matchPrefix2type.bool"".matchHost0type.bool "".tpltype.string"".rtype.*"".Routel%°F¯°»¯°Ý¯°È¯°·¯°ô¯°°¯°#дœE L ŒoWo +qû @  p¢ +   ›DpÌɨWYV•`év.Tgclocals·a579fb2cc990573d92ac647761c8f48eTgclocals·49f894b96db65eb6fa4537f9009e3618ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ,"".headerMatcher.Match ’eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‹k8H‰l$ÆD$è¶\$ˆ\$@HƒÄ à + 0runtime.morestack_noctxtn*"".matchMapWithString@@ "".~r20type.bool"".match &type.*"".RouteMatch"".r,type.*net/http.Request"".m*type."".headerMatcher@.?P €6 +6Tgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ&"".(*Route).Headers  +š +eH‹ %HD$ÈH;AwèëåHì¸H‹„$ÀHƒxX…HH‹œ$ÈH‰$H‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹\$H‰\$HH‹L$ H‹D$(H‹œ$ÀH‰$Hƒ<$„æHƒ$XH‰L$xH‰L$H‰„$€H‰D$èH‹”$ÀH‰T$@H‹\$HH‰\$PH‹1íH9è„eH‹L$PH‰D$hH‰D$XH‰L$pH‰L$`HƒzX…(H‰ÓH‹R H‹K(H‹[0H‰”$ H‰Œ$¨H‰œ$°H‰ØH)ËHƒû}OHH‰$H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$H‰„$˜H‰ÓH‰”$ˆH‰ÍHkíHëH‰$H‹\$XH‰\$H‹\$`H‰\$èH‹”$ˆH‹Œ$H‹„$˜H‹\$@H‰$Hƒ<$tFHƒ$ H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$èH‹T$@H‰”$àHĸÉ%ë±HH‰$HH‰\$HH‰\$èH‹T$@H‹D$édþÿÿ‰%éþÿÿH‰„$àHĸà +*0runtime.morestack_noctxt¸."".mapFromPairsToStringÐ2runtime.writebarrierifaceŒFgo.itab."".headerMatcher."".matcherÞ"type.[]"".matcherÐ"runtime.growsliceþ2runtime.writebarrieriface°2runtime.writebarriersliceú*type."".headerMatcher type."".matcher¨ Fgo.itab."".headerMatcher."".matcher¼  runtime.typ2ItabPð"".autotmp_0723_"type.[]"".matcher"".autotmp_0722/"type.[]"".matcher"".autotmp_0720Ÿtype."".matcher"".autotmp_0719type.error"".autotmp_0718"type.[]"".matcher"".autotmp_0717Ï*type."".headerMatcher"".m¿type."".matcher"".rïtype.*"".Route"".headersß,type.map[string]string "".~r1@type.*"".Route"".pairstype.[]string"".rtype.*"".Route "ðŽïð[ïª* wÅ  [LÀWYF3Tgclocals·61dac2719f307a892a4a15123f2e6a2dTgclocals·d0bf09074369fc3c2a2033c0e9216dd2ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ6"".headerRegexMatcher.Match ’eH‹ %H;awèëêHƒì H‹\$(H‰$H‹\$0H‹k8H‰l$ÆD$è¶\$ˆ\$@HƒÄ à + 0runtime.morestack_noctxtn("".matchMapWithRegex@@ "".~r20type.bool"".match &type.*"".RouteMatch"".r,type.*net/http.Request"".m4type."".headerRegexMatcher@.?P Â6 +6Tgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·3280bececceccd33cb74587feedb1f9fª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ2"".(*Route).HeadersRegexp  +š +eH‹ %HD$ÈH;AwèëåHì¸H‹„$ÀHƒxX…HH‹œ$ÈH‰$H‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹\$H‰\$HH‹L$ H‹D$(H‹œ$ÀH‰$Hƒ<$„æHƒ$XH‰L$xH‰L$H‰„$€H‰D$èH‹”$ÀH‰T$@H‹\$HH‰\$PH‹1íH9è„eH‹L$PH‰D$hH‰D$XH‰L$pH‰L$`HƒzX…(H‰ÓH‹R H‹K(H‹[0H‰”$ H‰Œ$¨H‰œ$°H‰ØH)ËHƒû}OHH‰$H‰”$ˆH‰T$H‰Œ$H‰L$H‰„$˜H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$H‰„$˜H‰ÓH‰”$ˆH‰ÍHkíHëH‰$H‹\$XH‰\$H‹\$`H‰\$èH‹”$ˆH‹Œ$H‹„$˜H‹\$@H‰$Hƒ<$tFHƒ$ H‰”$ H‰T$H‰Œ$¨H‰L$H‰„$°H‰D$èH‹T$@H‰”$àHĸÉ%ë±HH‰$HH‰\$HH‰\$èH‹T$@H‹D$édþÿÿ‰%éþÿÿH‰„$àHĸà +*0runtime.morestack_noctxt¸,"".mapFromPairsToRegexÐ2runtime.writebarrierifaceŒPgo.itab."".headerRegexMatcher."".matcherÞ"type.[]"".matcherÐ"runtime.growsliceþ2runtime.writebarrieriface°2runtime.writebarriersliceú4type."".headerRegexMatcher type."".matcher¨ Pgo.itab."".headerRegexMatcher."".matcher¼  runtime.typ2ItabPð"".autotmp_0739_"type.[]"".matcher"".autotmp_0738/"type.[]"".matcher"".autotmp_0736Ÿtype."".matcher"".autotmp_0735type.error"".autotmp_0734"type.[]"".matcher"".autotmp_0733Ï4type."".headerRegexMatcher"".m¿type."".matcher"".rïtype.*"".Route"".headersßèB,"G $C $q-ï ÂÈa{2ˆTgclocals·59dbf976b94cece68fb6f0f44435318fTgclocals·92e9b440ac71050dc24736a398ce1eacª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ&"".(*Route).URLPath€ ê eH‹ %HD$øH;AwèëåHìˆH‹„$HÇ„$¸HÇ„$ÀHƒxXt,HÇ„$°H‹hXH‰¬$¸H‹h`H‰¬$ÀHĈÃH‹X81íH9넉H‹h8H‹]1íH9ë„vH‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹œ$¨H‰\$èH‹L$ H‹D$(H‹T$0H‰”$€HƒøH‰D$xt$HÇ„$°H‰„$¸H‰”$ÀHĈÃH‹œ$H‹[8H‹kH‰,$H‰L$èH‹\$H‰\$hH‹\$H‰\$pH‹D$ H‹L$(H‰Œ$€HƒøH‰D$xt$HÇ„$°H‰„$¸H‰Œ$ÀHĈÃHH‰$èH‹|$H‰ùHƒÿtk1ÀèH‰L$8H‰ $Hƒ<$tKHƒ$8H‹\$hH‰\$H‹\$pH‰\$èH‹\$8H‰œ$°HÇ„$¸HÇ„$ÀHĈÉ%묉ë‘HH‹+H‰l$XH‹kH‰l$`HÇD$HHÇD$PHH‰$èH‹D$H‰D$@H‰$Hƒ<$„‰H‹\$XH‰\$H‹\$`H‰\$èH‹\$@H‰\$@H‹1íH9èt)H‹L$@HÇ„$°H‰„$¸H‰Œ$ÀHĈÃHH‰$HH‰\$HH‰\$èH‹D$먉%ékÿÿÿ" +*0runtime.morestack_noctxt†."".(*Route).prepareVarsÎ*"".(*routeRegexp).url† type.net/url.URL˜"runtime.newobjectÂÌ runtime.duffzerož4runtime.writebarrierstring Tgo.string."mux: route doesn't have a path"ô.type.errors.errorString† "runtime.newobjectê 4runtime.writebarrierstringŒ +Bgo.itab.*errors.errorString.errorú +0type.*errors.errorString type.error¨ Bgo.itab.*errors.errorString.error¼  runtime.typ2Itabp"".autotmp_0852Ÿ"type.*net/url.URL"".autotmp_08490type.*errors.errorString"".autotmp_08480type.*errors.errorString "".~r0type.errorerrors.text·2_type.string"".path?type.string "".errtype.error "".~r2Ptype.error "".~r1@"type.*net/url.URL"".pairstype.[]string"".rtype.*"".RouteL"R—r¹F€>˜B,"G $D $S- ó ÂÉCt2ŒTgclocals·59dbf976b94cece68fb6f0f44435318fTgclocals·92e9b440ac71050dc24736a398ce1eacª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ."".(*Route).prepareVars€òeH‹ %H;awèëêHƒì@HÇD$pHÇD$xH‹\$PH‰$H‹\$XH‰\$H‹\$`H‰\$èH‹L$H‹D$ H‹T$(H‰T$8HƒøH‰D$0tHÇD$hH‰D$pH‰T$xHƒÄ@ÃH‹\$HH‰$H‰L$èH‹\$H‰\$hHÇD$pHÇD$xHƒÄ@à + 0runtime.morestack_noctxt”."".mapFromPairsToString¨*"".(*Route).buildVarsp€ + "".errtype.error "".~r2Ptype.error "".~r1@,type.map[string]string"".pairstype.[]string"".rtype.*"".Route€j€3ÀÄ,6 ; +IwTgclocals·9877a4ef732a0f966b889793f9b99b87Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ*"".(*Route).buildVars€€eH‹ %H;awèëêHƒì(H‹L$0H‹D$8Hƒ9t*H‹H‹iH‰D$H‰l$ H‰,$H‰T$H‹Z ÿÓH‹L$0H‹D$H‹Yh1íH9ëtH‰D$8H‰$H‹QhH‹ÿÓH‹D$H‰D$@HƒÄ(à + 0runtime.morestack_noctxt +Þ +0P"".autotmp_0856,type.map[string]string "".~r1 ,type.map[string]string"".m,type.map[string]string"".rtype.*"".RoutePeO€Ô$*  + +H8Tgclocals·d3486bc7ce1948dc22d7ad1c0be2887aTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ4"".(*Route).getNamedRoutesààeH‹ %H;awèëêHƒì@H‹\$HHƒ;…ÀHH‰$HÇD$èH‹\$H‰\$(HH‰$èH‹L$H‰ÏHƒù„û1ÀèH‰L$ H‰ $Hƒ<$„ÔHƒ$8H‹\$(H‰\$èH‹D$ 1í@ˆhAH‰D$ H‹1íH9ètpH‹\$HH‰$Hƒ<$tWH‹L$ H‰D$0H‰D$H‰L$8H‰L$èH‹\$HHƒût*H‹H‹kH‰l$8H‰,$H‰D$0H‹X(ÿÓH‹\$H‰\$PHƒÄ@ÉëÒ‰%ë HH‰$HH‰\$HH‰\$èH‹D$é^ÿÿÿ‰%é ÿÿÿ‰éþþÿÿ + 0runtime.morestack_noctxtX2type.map[string]*"".Route|runtime.makemapžtype."".Router°"runtime.newobjectâÜ runtime.duffzero².runtime.writebarrierptràBgo.itab.*"".Router."".parentRouteÊ2runtime.writebarrierifaceš +Ütype.*"".Routerò&type."".parentRouteŠBgo.itab.*"".Router."".parentRoutež runtime.typ2Itab €"".autotmp_0862?type.*"".Router"".autotmp_0861type.*"".Router"".autotmp_08602type.map[string]*"".Route"".autotmp_0859type.*"".Router"".autotmp_0858/2type.map[string]*"".Route "".~r02type.map[string]*"".Route"".rtype.*"".Route€ƒ€Rð€ À9N=ALj"Tgclocals·31b90725c9a885e731df361f51db8f0dTgclocals·db0987207386230beda65332b07cbe03ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ4"".(*Route).getRegexpGroupÀ º eH‹ %H;awèëêHƒìPH‹l$XH‹]81íH9ë…FH‹\$XHƒ;…ÈHH‰$HÇD$èH‹\$H‰\$8HH‰$èH‹L$H‰ÏHƒù„T1ÀèH‰L$(H‰ $Hƒ<$„-Hƒ$8H‹\$8H‰\$èH‹D$(1í@ˆhAH‰D$(H‹1íH9è„ÅH‹\$XH‰$Hƒ<$„¥H‹L$(H‰D$@H‰D$H‰L$HH‰L$èH‹\$XHƒû„qH‹H‹kH‰l$HH‰,$H‰D$@H‹X0ÿÓH‹D$H‰D$ 1íH9èuPHH‰$èH‹D$H‹\$XH‰$Hƒ<$t"Hƒ$8H‰D$èH‹\$XH‹k8H‰l$`HƒÄPÉ%ëÕHH‰$èH‹D$H‰D$0H‰$Hƒ<$„ÀH‹\$ H‹+H‰l$èH‹\$0H‰$Hƒ<$„ŽHƒ$H‹\$ H‹kH‰l$èH‹\$0H‰$Hƒ<$t]Hƒ$H‹|$ HƒÿtIHoH|$H‰îH¥H¥H¥èH‹\$XH‰$Hƒ<$tHƒ$8H‹\$0H‰\$èéÿÿÿ‰%ëÞ‰볉%뚉%éfÿÿÿ‰%é4ÿÿÿ‰éˆþÿÿ‰%éOþÿÿHH‰$HH‰\$HH‰\$èH‹D$é þÿÿ‰%éÇýÿÿ‰é¥ýÿÿ0 + 0runtime.morestack_noctxt€2type.map[string]*"".Route¤runtime.makemapÆtype."".RouterØ"runtime.newobjectŠÜ runtime.duffzeroÚ.runtime.writebarrierptrˆBgo.itab.*"".Router."".parentRoute‚2runtime.writebarrierifaceÚ +†0type."".routeRegexpGroup˜"runtime.newobjectà.runtime.writebarrierptr¦0type."".routeRegexpGroup¸"runtime.newobjectŽ.runtime.writebarrierptræ.runtime.writebarrierptrÔ2runtime.writebarriersliceœ .runtime.writebarrierptr¶ +type.*"".RouterÌ +&type."".parentRouteä +Bgo.itab.*"".Router."".parentRouteø + runtime.typ2Itab  "".autotmp_08742type.*"".routeRegexpGroup"".autotmp_0872Otype.*"".Router"".autotmp_0871type.*"".Router"".autotmp_08702type.*"".routeRegexpGroup"".autotmp_0869?2type.*"".routeRegexpGroup"".autotmp_0868type.*"".Router"".autotmp_0867/2type.map[string]*"".Route"".regexp_2type.*"".routeRegexpGroup "".~r02type.*"".routeRegexpGroup"".rtype.*"".Route ìŸ ÙàH’ È/ 4 ž$ %T,QAT›+c$ n%Tgclocals·0d6246443c3fddb7ffb759a83afd407dTgclocals·3ed9fdb07b75789a32dd7844090b13c9ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.goþ"".init ŒeH‹ %H;awèëêHƒì0¶€ût¶€ûuHƒÄ0Ãè ÆèèèèèèèèèHH,$H‰ïH‰ÞH¥H¥èH‹L$H‹D$HH‰$H‰L$ H‰L$H‰D$(H‰D$èÆHƒÄ0Ã( + 0runtime.morestack_noctxt:"".initdone·R"".initdone·p"runtime.throwinit€"".initdone·Œstrings.init–strconv.init net/url.initªbytes.init´”github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.init¾regexp.initÈpath.initÒnet/http.initÜfmt.initê8go.string."skip this router"errors.New²"".SkipRouterì2runtime.writebarrierifaceø"".initdone·`"".autotmp_0878type.error`_`Ž_ rH¸ r¿HÀ 7™Tgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ª/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.goþ0"".parentRoute.buildVarsÀºeH‹ %H;awèëêHƒìH‹Y H…Ût H|$ H9;uH‰#H‹\$0H‰\$H‹\$(H‰$H‹\$ H‹[ ÿÓH‹\$H‰\$8HƒÄà + 0runtime.morestack_noctxt˜ +@0 "".~r10,type.map[string]string""..anon0 ,type.map[string]string""..this&type."".parentRoute0B/`` +LTgclocals·eeb28990c0dc813022336c3780186218Tgclocals·3280bececceccd33cb74587feedb1f9fþ:"".parentRoute.getNamedRoutesÀ¦eH‹ %H;awèëêHƒìH‹Y H…Ût H|$H9;uH‰#H‹\$ H‰$H‹\$H‹[(ÿÓH‹\$H‰\$(HƒÄà + 0runtime.morestack_noctxt„ +0  "".~r0 2type.map[string]*"".Route""..this&type."".parentRoute 8`` +BTgclocals·8cb639c12a4a13c6ace27031b0f83707Tgclocals·3280bececceccd33cb74587feedb1f9fþ:"".parentRoute.getRegexpGroupÀ¦eH‹ %H;awèëêHƒìH‹Y H…Ût H|$H9;uH‰#H‹\$ H‰$H‹\$H‹[0ÿÓH‹\$H‰\$(HƒÄà + 0runtime.morestack_noctxt„ +0  "".~r0 2type.*"".routeRegexpGroup""..this&type."".parentRoute 8`` +BTgclocals·8cb639c12a4a13c6ace27031b0f83707Tgclocals·3280bececceccd33cb74587feedb1f9fþ(type..hash.[8]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_0884type.int"".autotmp_0883type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[8]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ$type..eq.[8]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_0888?type.string"".autotmp_0887type.string"".autotmp_0886_type.int"".autotmp_0885Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[8]string"".ptype.*[8]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ "".matcher.MatchàÌeH‹ %H;awèëêHƒì H‹Y H…Ût H|$(H9;uH‰#H‹\$8H‰\$H‹\$@H‰\$H‹\$0H‰$H‹\$(H‹[ ÿÓ¶\$ˆ\$HHƒÄ à + 0runtime.morestack_noctxt¬ +P@ "".~r2@type.bool""..anon10&type.*"".RouteMatch""..anon0 ,type.*net/http.Request""..thistype."".matcher@K? p +p +VTgclocals·564befda8e2e8cc7f35f6bc1d3c5e0a6Tgclocals·3280bececceccd33cb74587feedb1f9fþ."".(*MatcherFunc).Match€îeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹\$8H‹+H‰,$H‹\$@H‰\$H‹\$HH‰\$è¶\$ˆ\$PHƒÄ0à + 0runtime.morestack_noctxt~go.string."mux"¨.go.string."MatcherFunc"Ô"go.string."Match"ü"runtime.panicwrapÊ("".MatcherFunc.Match@` "".~r20type.bool"".match &type.*"".RouteMatch"".r,type.*net/http.Request""..this(type.*"".MatcherFunc`œ_ +À À +}CTgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·3280bececceccd33cb74587feedb1f9fþ4type..hash.[1]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0893type.int"".autotmp_0892type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[1]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ0type..eq.[1]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0897?"type.interface {}"".autotmp_0896"type.interface {}"".autotmp_0895_type.int"".autotmp_0894Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[1]interface {}"".p*type.*[1]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ4type..hash.[3]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0900type.int"".autotmp_0899type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[3]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ0type..eq.[3]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0904?"type.interface {}"".autotmp_0903"type.interface {}"".autotmp_0902_type.int"".autotmp_0901Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[3]interface {}"".p*type.*[3]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ4type..hash.[2]interface {}àÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtü(runtime.nilinterhash@` "".autotmp_0907type.int"".autotmp_0906type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".p*type.*[2]interface {}`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9f¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ0type..eq.[2]interface {}àÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$hHƒû„–H‰ÅHkíHëH‹ H‹sH‹\$`HƒûtvH‰ÅHkíHëH‹H‹SH9ÈuVH‰D$8H‰$H‰T$@H‰T$H‰L$HH‰L$H‰t$PH‰t$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxtÂruntime.efaceeq@°"".autotmp_0911?"type.interface {}"".autotmp_0910"type.interface {}"".autotmp_0909_type.int"".autotmp_0908Otype.int "".~r30type.bool"".s type.uintptr"".q*type.*[2]interface {}"".p*type.*[2]interface {}&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843b¦/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.goþ2"".(*headerMatcher).Match€îeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹\$8H‹+H‰,$H‹\$@H‰\$H‹\$HH‰\$è¶\$ˆ\$PHƒÄ0à + 0runtime.morestack_noctxt~go.string."mux"¨2go.string."headerMatcher"Ô"go.string."Match"ü"runtime.panicwrapÊ,"".headerMatcher.Match@` "".~r20type.bool"".match &type.*"".RouteMatch"".r,type.*net/http.Request""..this,type.*"".headerMatcher`œ_ +ÀÀ +}CTgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·3280bececceccd33cb74587feedb1f9fþ<"".(*headerRegexMatcher).Match€îeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹\$8H‹+H‰,$H‹\$@H‰\$H‹\$HH‰\$è¶\$ˆ\$PHƒÄ0à + 0runtime.morestack_noctxt~go.string."mux"¨þ2"".(*methodMatcher).Match ŽeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$8Hƒþt4H,$H‰ïH¥H¥H¥H‹\$@H‰\$H‹\$HH‰\$ è¶\$(ˆ\$PHƒÄ0ÉëÈ + 0runtime.morestack_noctxt~go.string."mux"¨2go.string."methodMatcher"Ô"go.string."Match"ü"runtime.panicwrapâ,"".methodMatcher.Match@` "".~r20type.bool"".match &type.*"".RouteMatch"".r,type.*net/http.Request""..this,type.*"".methodMatcher`¨_` ÐÐ +}STgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·3280bececceccd33cb74587feedb1f9fþ2"".(*schemeMatcher).Match ŽeH‹ %H;awèëêHƒì0H‹Y H…Ût H|$8H9;uH‰#H‹\$81íH9ëuHHH,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥HHl$ H‰ïH‰ÞH¥H¥è H‹t$8Hƒþt4H,$H‰ïH¥H¥H¥H‹\$@H‰\$H‹\$HH‰\$ è¶\$(ˆ\$PHƒÄ0ÉëÈ + 0runtime.morestack_noctxt~go.string."mux"¨2go.string."schemeMatcher"Ô"go.string."Match"ü"runtime.panicwrapâ,"".schemeMatcher.Match@` "".~r20type.bool"".match &type.*"".RouteMatch"".r,type.*net/http.Request""..this,type.*"".schemeMatcher`¨_` ÐÐ +}STgclocals·8d11a518189555fd7f3bac3cc6ad264cTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·f6dcde45bff02c6c4b088b594fd52a4c((þTgclocals·37da6a443256db8ec55c7210d030a9b0((þTgclocals·29f0050a5ee7c2b9348a75428171d7de þTgclocals·d69c4140875de858f5dc9e2e8acb0bc0 **þ,Zgo.itab.net/http.HandlerFunc.net/http.Handlerþ(go.string."Location"@2Location (go.string."Location"þTgclocals·4c1561a135d5ed5147fd4ff64ff73c94€€6°° ¢ˆ ¢ˆþTgclocals·7a383875e23784cb158d762414ce6278HH®®®®®®®þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·64b411f0f44be3f38c26e84fc3239091þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·d3486bc7ce1948dc22d7ad1c0be2887a +þ,Bgo.itab.*"".Router."".parentRouteþTgclocals·691c0cb9316c0a5f7d8580c74ac115f2@@ (þTgclocals·0d6246443c3fddb7ffb759a83afd407d@@þTgclocals·8cdbdba615b2fb90357456ca3f2cb9a4PP   ""þTgclocals·259efa0f9d5b5ab4cbb1f7201749d3e1PP ÊÊÊÊÊÊÊÊþTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·1c7793dad628d89b0b03aa7a6b5e8ac7HH +ŠŠŠŠŠŠŠþTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0HH + + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·ccff1a4364f53102a1b73e3274c6c0d4HH + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·6b6fbfacf80ed81d2be06478c8f1790dHH + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0HH + + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·ccff1a4364f53102a1b73e3274c6c0d4HH + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·ccff1a4364f53102a1b73e3274c6c0d4HH + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0HH + + + + + + + +þTgclocals·f24e5ae57611d01ccf1f96d64c337e04HHˆ¨( + + þTgclocals·466dbe9b6d0b019671e1c2db1c9f0ba0HH + + + + + + + +þTgclocals·38f35918b64660b95e0269a6592b7ed4PP   ""€þTgclocals·776d19cc6eced68e652f85d577f321c6PP + + + + + + + +þTgclocals·0528ab8f76149a707fd2f0025c2178a3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·e1370d8c0370fc841121204684c0e45dpp.‚¢Šˆ €þTgclocals·02f53cdec99f366e42fb544f32ed9035@@******þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9b807a1de79759fa48658b2ca8ff7282>þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·9b807a1de79759fa48658b2ca8ff7282>þgo.string."/"0$/ go.string."/"þTgclocals·29f0050a5ee7c2b9348a75428171d7de þTgclocals·771157e6981a4b26b64a947269cc9ecb þZgo.string."mux: duplicated route variable %q"pd!mux: duplicated route variable %q Zgo.string."mux: duplicated route variable %q"þTgclocals·a4b09b32f70466d9a6c07b8385c51f8aPP.* / þTgclocals·90aaa11a3c4e552027084aaae119235b00‚‚‚‚þ†go.string."mux: number of parameters must be multiple of 2, got %v"7mux: number of parameters must be multiple of 2, got %v †go.string."mux: number of parameters must be multiple of 2, got %v"þTgclocals·7876b70d8da64fa07ca2fd3ecc71f905((ð‚ð€þTgclocals·6d3fa487f5e45db9cb9199d2a5e0e216(( þTgclocals·61e2515c69061b8fed0e66ece719f936 þTgclocals·ca1ebfc68aaed1d083688775167e5178  þTgclocals·023baa463c1418986a4dad82d1430d9900  +þTgclocals·9fcabcff059425eb5732bdd45e48e99f00 þTgclocals·4398bb51467914f29637b614067b995f þTgclocals·f608478770c574ea7f894c13fa2c89c9  ‚‚þTgclocals·7212e532adb3b2b744244d8012f4d919PP2€ªV€€ªV‚€ªVþTgclocals·6302ce6642f568c714fa473870d50e5100þTgclocals·12a3c69de01d25238dd5def67adcdcd9PP0 ªU ªU + ªUþTgclocals·6302ce6642f568c714fa473870d50e5100þ,>go.itab.*bytes.Buffer.io.Writerþ"go.string."[^/]+"0,[^/]+ "go.string."[^/]+"þ$go.string."[^?&]*"0.[^?&]* $go.string."[^?&]*"þ"go.string."[^.]+"0,[^.]+ "go.string."[^.]+"þgo.string.":"0$: go.string.":"þ\go.string."mux: missing name or pattern in %q"pf"mux: missing name or pattern in %q \go.string."mux: missing name or pattern in %q"þ0go.string."%s(?P<%s>%s)"@: %s(?P<%s>%s) 0go.string."%s(?P<%s>%s)"þ"go.string."%s%%s"0,%s%%s "go.string."%s%%s"þ go.string."^%s$"0*^%s$ go.string."^%s$"þ go.string."[/]?"0*[/]? go.string."[/]?"þgo.string."="0$= go.string."="þ"go.string.""0, "go.string.""þTgclocals·33531776c15af84406e52705f6739a4bà +à +"š€ € € € € € € "€€ " €€€ " €€€ " €€€ˆ "(€€€ˆ Âÿ"€€€ˆ Àÿ" €€ "(€€È  "€€È  "€€ "€È  "€È  È"€À +È  È"€"€"€€€ˆþTgclocals·794bc2a224f980dced8624445883a1f1  " þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·8d11a518189555fd7f3bac3cc6ad264c*þTgo.string."mux: missing route variable %q"`^mux: missing route variable %q Tgo.string."mux: missing route variable %q"þngo.string."mux: variable %q doesn't match, expected %q"€x+mux: variable %q doesn't match, expected %q ngo.string."mux: variable %q doesn't match, expected %q"þTgclocals·016ee616535ff5ac97db8c28536faf6d€€ +RÈ È€€ ü€üþTgclocals·87979038f036a055d96e4dae0820fce3`` + + + + + + + + + + +þTgclocals·d0cd7946f7c85d974217a4cbfbe17824``( ªU" ªU ªUþTgclocals·f99f470b4e8bf0bbfec1c215fb234ac788 + + + + +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·d3486bc7ce1948dc22d7ad1c0be2887a +þPgo.string."mux: unbalanced braces in %q"`Zmux: unbalanced braces in %q Pgo.string."mux: unbalanced braces in %q"þTgclocals·36d420fa591ebaf09d3a180d960e2a08((  þTgclocals·9680905063a74374258fdae79a25b518((þgo.string."v"0$v go.string."v"þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·a73fd2a0c6f832642aa9216fd9c5e6beþ,dgo.itab.*net/http.redirectHandler.net/http.HandlerþTgclocals·d36b9b8893a5ece0bc46499614098043ðð ~€€€€((€(þTgclocals·4fc7bdfae1004e17f25f57b4d9db22c7hh ªªªªªªªªªªªþTgclocals·8d600a433c6aaa81a4fe446d95c5546b þTgclocals·d7e8a62d22b1cde6d92b17a55c33fe8f þTgclocals·4398bb51467914f29637b614067b995f þTgclocals·d69c4140875de858f5dc9e2e8acb0bc0 **þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·06cab038d51064a089bda21fa03e00f7þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·433981679ca6b8ba029d40d9f4c7048c.þTgclocals·deb2efaee408e42e52d8a1e8a4a979e1(( +þTgclocals·3cd76c4f8d01c613585e17871258aa07(( + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þpgo.string."mux: route already has name %q, can't set %q"€z,mux: route already has name %q, can't set %q pgo.string."mux: route already has name %q, can't set %q"þTgclocals·609fbbd38973bf0432058b6e5d6645e900ˆððþTgclocals·4205cab2470caaf976442750814b93e400 + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·23c4785fa8abd7e258acfe91c9f325f3  þTgclocals·a69e79957b5150531998200513ab99ee ..þ,Dgo.itab.*"".routeRegexp."".matcherþjgo.string."mux: path must start with a slash, got %q"€t)mux: path must start with a slash, got %q jgo.string."mux: path must start with a slash, got %q"þTgclocals·49f894b96db65eb6fa4537f9009e3618€€ +B € ‚(  þTgclocals·a579fb2cc990573d92ac647761c8f48e`` + JJJJJJJJJJþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·8d11a518189555fd7f3bac3cc6ad264c*þ,Fgo.itab."".headerMatcher."".matcherþTgclocals·d0bf09074369fc3c2a2033c0e9216dd2@@"ÂþTgclocals·61dac2719f307a892a4a15123f2e6a2d@@ + + + + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·8d11a518189555fd7f3bac3cc6ad264c*þ,Pgo.itab."".headerRegexMatcher."".matcherþTgclocals·d0bf09074369fc3c2a2033c0e9216dd2@@"ÂþTgclocals·61dac2719f307a892a4a15123f2e6a2d@@ + + + + + + +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·8d11a518189555fd7f3bac3cc6ad264c*þ,Bgo.itab."".MatcherFunc."".matcherþTgclocals·75f60c0a81a1e5f06874fff822af42bb88 +² þTgclocals·ab01a2d55089ff50c402006df1039c3988 + + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·7f9f3eadddff48005164209ea06f81db ‚þ,Fgo.itab."".methodMatcher."".matcherþTgclocals·ea547fb5b4f79d347b1811ec26dee271pp,  þTgclocals·61dac2719f307a892a4a15123f2e6a2d@@ + + + + + + +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·e0dd5664695c71438932a711825a98a4 +þTgclocals·c3929f74b6da126d84d869df0f05d5b9((/þTgclocals·149f5bf45741ad4d84849674a456615e(( + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·7f9f3eadddff48005164209ea06f81db ‚þ,Fgo.itab."".schemeMatcher."".matcherþTgclocals·ea547fb5b4f79d347b1811ec26dee271pp,  þTgclocals·61dac2719f307a892a4a15123f2e6a2d@@ + + + + + + +þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·d3486bc7ce1948dc22d7ad1c0be2887a +þ,@go.itab.*"".Route."".parentRouteþ,:go.itab.*"".Router."".matcherþTgclocals·b285e033b8bcfe9b9c1b7a6b8ade4839PP €  * + + +þTgclocals·d2a701546bccde17a508ee8f261130f1PPþ,Bgo.itab.*errors.errorString.errorþdgo.string."mux: route doesn't have a host or path"pn&mux: route doesn't have a host or path dgo.string."mux: route doesn't have a host or path"þ go.string."http"0*http go.string."http"þTgclocals·15a9e9c4f9817efd4475d45f753600deXX  €€€€ˆˆþTgclocals·918b03c3cf4f7263dd73363217e9a538XX  + + + + + + + + +þTgo.string."mux: route doesn't have a host"`^mux: route doesn't have a host Tgo.string."mux: route doesn't have a host"þTgclocals·92e9b440ac71050dc24736a398ce1eac88 þTgclocals·59dbf976b94cece68fb6f0f44435318f88 + + + + +þTgo.string."mux: route doesn't have a path"`^mux: route doesn't have a path Tgo.string."mux: route doesn't have a path"þTgclocals·92e9b440ac71050dc24736a398ce1eac88 þTgclocals·59dbf976b94cece68fb6f0f44435318f88 + + + + +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·9877a4ef732a0f966b889793f9b99b87 +þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·d3486bc7ce1948dc22d7ad1c0be2887a +þTgclocals·db0987207386230beda65332b07cbe03((þTgclocals·31b90725c9a885e731df361f51db8f0d((þTgclocals·3ed9fdb07b75789a32dd7844090b13c9@@ €" þTgclocals·0d6246443c3fddb7ffb759a83afd407d@@þ8go.string."skip this router"PBskip this router 8go.string."skip this router"þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·3280bececceccd33cb74587feedb1f9fþ*"".SkipRouter type.errorþ,"".initdone·type.uint8þ"".NewRouter·f"".NewRouterþ$runtime.makemap·fruntime.makemapþ(runtime.newobject·f"runtime.newobjectþ4runtime.writebarrierptr·f.runtime.writebarrierptrþ,runtime.throwreturn·f&runtime.throwreturnþ*"".(*Router).Match·f$"".(*Router).Matchþ("".(*Route).Match·f""".(*Route).Matchþ2"".(*Router).ServeHTTP·f,"".(*Router).ServeHTTPþ"".cleanPath·f"".cleanPathþ&runtime.eqstring·f runtime.eqstringþ0net/url.(*URL).String·f*net/url.(*URL).Stringþ,net/http.Header.Set·f&net/http.Header.Setþ,runtime.deferreturn·f&runtime.deferreturnþ"".setVars·f"".setVarsþ*"".setCurrentRoute·f$"".setCurrentRouteþ(net/http.NotFound·f"net/http.NotFoundþ&runtime.typ2Itab·f runtime.typ2Itabþœgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Clear·f–github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Clearþ(runtime.deferproc·f"runtime.deferprocþ&"".(*Router).Get·f "".(*Router).Getþ<"".(*Router).getNamedRoutes·f6"".(*Router).getNamedRoutesþ:runtime.mapaccess1_faststr·f4runtime.mapaccess1_faststrþ0"".(*Router).GetRoute·f*"".(*Router).GetRouteþ6"".(*Router).StrictSlash·f0"".(*Router).StrictSlashþ<"".(*Router).getRegexpGroup·f6"".(*Router).getRegexpGroupþ2"".(*Router).buildVars·f,"".(*Router).buildVarsþ0"".(*Router).NewRoute·f*"".(*Router).NewRouteþ8runtime.writebarrieriface·f2runtime.writebarrierifaceþ(runtime.growslice·f"runtime.growsliceþ8runtime.writebarrierslice·f2runtime.writebarriersliceþ,"".(*Router).Handle·f&"".(*Router).Handleþ&"".(*Route).Path·f "".(*Route).Pathþ4"".(*Router).HandleFunc·f."".(*Router).HandleFuncþ4"".(*Route).HandlerFunc·f."".(*Route).HandlerFuncþ."".(*Router).Headers·f("".(*Router).Headersþ,"".(*Route).Headers·f&"".(*Route).Headersþ("".(*Router).Host·f""".(*Router).Hostþ&"".(*Route).Host·f "".(*Route).Hostþ6"".(*Router).MatcherFunc·f0"".(*Router).MatcherFuncþ4"".(*Route).MatcherFunc·f."".(*Route).MatcherFuncþ."".(*Router).Methods·f("".(*Router).Methodsþ,"".(*Route).Methods·f&"".(*Route).Methodsþ("".(*Router).Path·f""".(*Router).Pathþ4"".(*Router).PathPrefix·f."".(*Router).PathPrefixþ2"".(*Route).PathPrefix·f,"".(*Route).PathPrefixþ."".(*Router).Queries·f("".(*Router).Queriesþ,"".(*Route).Queries·f&"".(*Route).Queriesþ."".(*Router).Schemes·f("".(*Router).Schemesþ,"".(*Route).Schemes·f&"".(*Route).Schemesþ:"".(*Router).BuildVarsFunc·f4"".(*Router).BuildVarsFuncþ("".(*Router).Walk·f""".(*Router).Walkþ("".(*Router).walk·f""".(*Router).walkþ$runtime.ifaceeq·fruntime.ifaceeqþ*runtime.assertI2T2·f$runtime.assertI2T2þ*runtime.panicslice·f$runtime.panicsliceþ"".Vars·f"".Varsþ$runtime.convT2E·fruntime.convT2Eþ˜github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Get·f’github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Getþ(runtime.assertE2T·f"runtime.assertE2Tþ$"".CurrentRoute·f"".CurrentRouteþ˜github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Set·f’github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.Setþ*runtime.panicindex·f$runtime.panicindexþ0runtime.concatstring2·f*runtime.concatstring2þpath.Clean·fpath.Cleanþ "".uniqueVars·f"".uniqueVarsþfmt.Errorf·ffmt.Errorfþ "".checkPairs·f"".checkPairsþ4"".mapFromPairsToString·f."".mapFromPairsToStringþ*runtime.mapassign1·f$runtime.mapassign1þ2"".mapFromPairsToRegex·f,"".mapFromPairsToRegexþ"regexp.Compile·fregexp.Compileþ$"".matchInArray·f"".matchInArrayþ0"".matchMapWithString·f*"".matchMapWithStringþ,runtime.mapiterinit·f&runtime.mapiterinitþ,runtime.mapiternext·f&runtime.mapiternextþregexp.(*Regexp).MatchString·f8regexp.(*Regexp).MatchStringþ("".newRouteRegexp·f""".newRouteRegexpþ$"".braceIndices·f"".braceIndicesþ(runtime.makeslice·f"runtime.makesliceþ8runtime.stringtoslicebyte·f2runtime.stringtoslicebyteþ8bytes.(*Buffer).WriteByte·f2bytes.(*Buffer).WriteByteþ"strings.SplitN·fstrings.SplitNþ®exp.QuoteMeta·f regexp.QuoteMetaþ$"".varGroupName·f"".varGroupNameþfmt.Fprintf·ffmt.Fprintfþ:runtime.writebarrierstring·f4runtime.writebarrierstringþfmt.Sprintf·ffmt.Sprintfþ"".(*routeRegexpGroup).setMatchþLregexp.(*Regexp).FindStringSubmatch·fFregexp.(*Regexp).FindStringSubmatchþ net/url.Parse·fnet/url.Parseþ strings.Index·fstrings.Indexþ."".(*Route).GetError·f("".(*Route).GetErrorþ0"".(*Route).BuildOnly·f*"".(*Route).BuildOnlyþ,"".(*Route).Handler·f&"".(*Route).Handlerþ2"".(*Route).GetHandler·f,"".(*Route).GetHandlerþ&"".(*Route).Name·f "".(*Route).Nameþ:"".(*Route).getNamedRoutes·f4"".(*Route).getNamedRoutesþ,"".(*Route).GetName·f&"".(*Route).GetNameþ2"".(*Route).addMatcher·f,"".(*Route).addMatcherþ>"".(*Route).addRegexpMatcher·f8"".(*Route).addRegexpMatcherþ:"".(*Route).getRegexpGroup·f4"".(*Route).getRegexpGroupþ(strings.TrimRight·f"strings.TrimRightþ2"".headerMatcher.Match·f,"".headerMatcher.Matchþ<"".headerRegexMatcher.Match·f6"".headerRegexMatcher.Matchþ8"".(*Route).HeadersRegexp·f2"".(*Route).HeadersRegexpþ."".MatcherFunc.Match·f("".MatcherFunc.Matchþ2"".methodMatcher.Match·f,"".methodMatcher.Matchþ$strings.ToUpper·fstrings.ToUpperþ$runtime.convT2I·fruntime.convT2Iþ2"".schemeMatcher.Match·f,"".schemeMatcher.Matchþ$strings.ToLower·fstrings.ToLowerþ8"".(*Route).BuildVarsFunc·f2"".(*Route).BuildVarsFuncþ0"".(*Route).Subrouter·f*"".(*Route).Subrouterþ$"".(*Route).URL·f"".(*Route).URLþ4"".(*Route).prepareVars·f."".(*Route).prepareVarsþ,"".(*Route).URLHost·f&"".(*Route).URLHostþ,"".(*Route).URLPath·f&"".(*Route).URLPathþ0"".(*Route).buildVars·f*"".(*Route).buildVarsþ"".init·f"".initþ(runtime.throwinit·f"runtime.throwinitþstrings.init·fstrings.initþstrconv.init·fstrconv.initþnet/url.init·fnet/url.initþbytes.init·fbytes.initþšgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/context.init·f”github.com/fsouza/go-dockerclient/external/github.com/gorilla/context.initþregexp.init·fregexp.initþpath.init·fpath.initþ net/http.init·fnet/http.initþfmt.init·ffmt.initþerrors.New·ferrors.NewþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·eeb28990c0dc813022336c3780186218+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·8cb639c12a4a13c6ace27031b0f83707 þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·8cb639c12a4a13c6ace27031b0f83707 þbruntime.gcbits.0x48844400000000000000000000000000 H„Dþ(go.string."[]string"@2[]string (go.string."[]string"þtype.[]string  Ó¨ó +   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P(go.string."[]string"p,go.weak.type.*[]string€"runtime.zerovaluetype.stringþ:go.typelink.[]string/[]stringtype.[]stringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc.[8]string(type..hash.[8]stringþ,type..eqfunc.[8]string$type..eq.[8]stringþ&type..alg.[8]string 0type..hashfunc.[8]string,type..eqfunc.[8]stringþbruntime.gcbits.0x48484848484848480000000000000000 HHHHHHHHþ*go.string."[8]string"@4 [8]string *go.string."[8]string"þtype.[8]stringÀÀ€USŒ> &type..alg.[8]string0bruntime.gcbits.0x48484848484848480000000000000000P*go.string."[8]string"p.go.weak.type.*[8]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[8]string/[8]stringtype.[8]stringþbruntime.gcbits.0x88000000000000000000000000000000 ˆþJgo.string."*map.bucket[string]string"`T*map.bucket[string]string Jgo.string."*map.bucket[string]string"þYˆ à runtime.algarray0Btype..gc.map.bucket[string]string@Jtype..gcprog.map.bucket[string]stringPHgo.string."map.bucket[string]string"pLgo.weak.type.*map.bucket[string]string€"runtime.zerovalueÀ:type.map.bucket[string]stringÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]stringà(go.string."overflow"€go.weak.type.*map[string]string€"runtime.zerovaluetype.string type.string°:type.map.bucket[string]stringÀ4type.map.hdr[string]stringþ^go.typelink.map[string]string/map[string]string,type.map[string]stringþjgo.string."func(map[string]string) map[string]string"€t)func(map[string]string) map[string]string jgo.string."func(map[string]string) map[string]string"þ\type.func(map[string]string) map[string]string  B½ü$3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(map[string]string) map[string]string"pngo.weak.type.*func(map[string]string) map[string]string€"runtime.zerovalue €\type.func(map[string]string) map[string]stringÐ\type.func(map[string]string) map[string]string€,type.map[string]string,type.map[string]stringþPgo.string."func() map[string]*mux.Route"`Zfunc() map[string]*mux.Route Pgo.string."func() map[string]*mux.Route"þ@type.func() map[string]*"".Routeûù 93 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func() map[string]*mux.Route"pRgo.weak.type.*func() map[string]*"".Route€"runtime.zerovalue €@type.func() map[string]*"".RouteЀ@type.func() map[string]*"".Route€2type.map[string]*"".Routeþ8go.string."[]*regexp.Regexp"PB[]*regexp.Regexp 8go.string."[]*regexp.Regexp"þ*type.[]*regexp.Regexp  ›Èpà   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P8go.string."[]*regexp.Regexp"pmux.RouteMatch 4go.string."mux.RouteMatch"þ"go.string."Route"0,Route "go.string."Route"þ&go.string."Handler"00Handler &go.string."Handler"þ go.string."Vars"0*Vars go.string."Vars"þ,go.string."RouteMatch"@6 +RouteMatch ,go.string."RouteMatch"þ$type."".RouteMatch€€  ÷Ž@ à runtime.algarray0bruntime.gcbits.0xc8880000000000000000000000000000P4go.string."mux.RouteMatch"p&type.*"".RouteMatch€"runtime.zerovalueÀ$type."".RouteMatchÀ"go.string."Route"àtype.*"".Route&go.string."Handler"°*type.net/http.Handlerà go.string."Vars"€,type.map[string]string`°$type."".RouteMatch°,go.string."RouteMatch"À"go.importpath."".Ѐ$type."".RouteMatchþ6go.string."*mux.RouteMatch"@@*mux.RouteMatch 6go.string."*mux.RouteMatch"þ&type.*"".RouteMatch  ¦A¯}6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*mux.RouteMatch"p8go.weak.type.**"".RouteMatch€"runtime.zerovalue$type."".RouteMatchþŽgo.string."func(*mux.routeRegexp, *http.Request, *mux.RouteMatch) bool" ˜;func(*mux.routeRegexp, *http.Request, *mux.RouteMatch) bool Žgo.string."func(*mux.routeRegexp, *http.Request, *mux.RouteMatch) bool"þ„type.func(*"".routeRegexp, *net/http.Request, *"".RouteMatch) boolÀÀÅ`3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŽgo.string."func(*mux.routeRegexp, *http.Request, *mux.RouteMatch) bool"p–go.weak.type.*func(*"".routeRegexp, *net/http.Request, *"".RouteMatch) bool€"runtime.zerovalue €„type.func(*"".routeRegexp, *net/http.Request, *"".RouteMatch) boolа„type.func(*"".routeRegexp, *net/http.Request, *"".RouteMatch) bool€(type.*"".routeRegexp,type.*net/http.Request &type.*"".RouteMatch°type.boolþpgo.string."func(*mux.routeRegexp, *http.Request) string"€z,func(*mux.routeRegexp, *http.Request) string pgo.string."func(*mux.routeRegexp, *http.Request) string"þhtype.func(*"".routeRegexp, *net/http.Request) string°°¹u©3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."func(*mux.routeRegexp, *http.Request) string"pzgo.weak.type.*func(*"".routeRegexp, *net/http.Request) string€"runtime.zerovalue €htype.func(*"".routeRegexp, *net/http.Request) stringРhtype.func(*"".routeRegexp, *net/http.Request) string€(type.*"".routeRegexp,type.*net/http.Request type.stringþlgo.string."func(*mux.routeRegexp, *http.Request) bool"€v*func(*mux.routeRegexp, *http.Request) bool lgo.string."func(*mux.routeRegexp, *http.Request) bool"þdtype.func(*"".routeRegexp, *net/http.Request) bool°°´ÖÐ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(*mux.routeRegexp, *http.Request) bool"pvgo.weak.type.*func(*"".routeRegexp, *net/http.Request) bool€"runtime.zerovalue €dtype.func(*"".routeRegexp, *net/http.Request) boolРdtype.func(*"".routeRegexp, *net/http.Request) bool€(type.*"".routeRegexp,type.*net/http.Request type.boolþŠgo.string."func(*mux.routeRegexp, map[string]string) (string, error)" ”9func(*mux.routeRegexp, map[string]string) (string, error) Šgo.string."func(*mux.routeRegexp, map[string]string) (string, error)"þztype.func(*"".routeRegexp, map[string]string) (string, error)ÀÀa]Ð{3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŠgo.string."func(*mux.routeRegexp, map[string]string) (string, error)"pŒgo.weak.type.*func(*"".routeRegexp, map[string]string) (string, error)€"runtime.zerovalue €ztype.func(*"".routeRegexp, map[string]string) (string, error)Рztype.func(*"".routeRegexp, map[string]string) (string, error)€(type.*"".routeRegexp,type.map[string]string type.string°type.errorþ"go.string."Match"0,Match "go.string."Match"þjgo.string."func(*http.Request, *mux.RouteMatch) bool"€t)func(*http.Request, *mux.RouteMatch) bool jgo.string."func(*http.Request, *mux.RouteMatch) bool"þbtype.func(*net/http.Request, *"".RouteMatch) bool°°=Õ±3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(*http.Request, *mux.RouteMatch) bool"ptgo.weak.type.*func(*net/http.Request, *"".RouteMatch) bool€"runtime.zerovalue €btype.func(*net/http.Request, *"".RouteMatch) boolРbtype.func(*net/http.Request, *"".RouteMatch) bool€,type.*net/http.Request&type.*"".RouteMatch type.boolþ.go.string."getUrlQuery"@8 getUrlQuery .go.string."getUrlQuery"þLgo.string."func(*http.Request) string"`Vfunc(*http.Request) string Lgo.string."func(*http.Request) string"þFtype.func(*net/http.Request) string  ôv t3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(*http.Request) string"pXgo.weak.type.*func(*net/http.Request) string€"runtime.zerovalue €Ftype.func(*net/http.Request) stringÐFtype.func(*net/http.Request) string€,type.*net/http.Requesttype.stringþ8go.string."matchQueryString"PBmatchQueryString 8go.string."matchQueryString"þHgo.string."func(*http.Request) bool"`Rfunc(*http.Request) bool Hgo.string."func(*http.Request) bool"þBtype.func(*net/http.Request) bool  eÿÀ¬3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."func(*http.Request) bool"pTgo.weak.type.*func(*net/http.Request) bool€"runtime.zerovalue €Btype.func(*net/http.Request) boolÐBtype.func(*net/http.Request) bool€,type.*net/http.Requesttype.boolþgo.string."url"0(url go.string."url"þfgo.string."func(map[string]string) (string, error)"pp'func(map[string]string) (string, error) fgo.string."func(map[string]string) (string, error)"þXtype.func(map[string]string) (string, error)°°^aG3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(map[string]string) (string, error)"pjgo.weak.type.*func(map[string]string) (string, error)€"runtime.zerovalue €Xtype.func(map[string]string) (string, error)ÐXtype.func(map[string]string) (string, error)€,type.map[string]stringtype.string type.errorþ(type.*"".routeRegexpððT~”è6>   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*mux.routeRegexp"p:go.weak.type.**"".routeRegexp€"runtime.zerovalue&type."".routeRegexp` (type.*"".routeRegexpÀð(type.*"".routeRegexpð"go.string."Match"btype.func(*net/http.Request, *"".RouteMatch) bool „type.func(*"".routeRegexp, *net/http.Request, *"".RouteMatch) bool°."".(*routeRegexp).MatchÀ."".(*routeRegexp).MatchÐ.go.string."getUrlQuery"à"go.importpath."".ðFtype.func(*net/http.Request) string€htype.func(*"".routeRegexp, *net/http.Request) string:"".(*routeRegexp).getUrlQuery :"".(*routeRegexp).getUrlQuery°8go.string."matchQueryString"À"go.importpath."".ÐBtype.func(*net/http.Request) boolàdtype.func(*"".routeRegexp, *net/http.Request) boolðD"".(*routeRegexp).matchQueryString€D"".(*routeRegexp).matchQueryStringgo.string."url" "go.importpath."".°Xtype.func(map[string]string) (string, error)Àztype.func(*"".routeRegexp, map[string]string) (string, error)Ð*"".(*routeRegexp).urlà*"".(*routeRegexp).urlþgo.weak.type.*[]*"".routeRegexp€"runtime.zerovalue(type.*"".routeRegexpþ`go.typelink.[]*mux.routeRegexp/[]*"".routeRegexp,type.[]*"".routeRegexpþbruntime.gcbits.0x88488488440000000000000000000000 ˆH„ˆDþ@go.string."mux.routeRegexpGroup"PJmux.routeRegexpGroup @go.string."mux.routeRegexpGroup"þ go.string."host"0*host go.string."host"þ go.string."path"0*path go.string."path"þ&go.string."queries"00queries &go.string."queries"þ8go.string."routeRegexpGroup"PBrouteRegexpGroup 8go.string."routeRegexpGroup"þ0type."".routeRegexpGroup€€(“1JF& à runtime.algarray0bruntime.gcbits.0x88488488440000000000000000000000P@go.string."mux.routeRegexpGroup"p2type.*"".routeRegexpGroup€"runtime.zerovalueÀ0type."".routeRegexpGroupÀ go.string."host"Ð"go.importpath."".à(type.*"".routeRegexp go.string."path" "go.importpath."".°(type.*"".routeRegexpà&go.string."queries"ð"go.importpath."".€,type.[]*"".routeRegexp`°0type."".routeRegexpGroup°8go.string."routeRegexpGroup"À"go.importpath."".Ѐ0type."".routeRegexpGroupþBgo.string."*mux.routeRegexpGroup"PL*mux.routeRegexpGroup Bgo.string."*mux.routeRegexpGroup"þ¦go.string."func(*mux.routeRegexpGroup, *http.Request, *mux.RouteMatch, *mux.Route)"°°Gfunc(*mux.routeRegexpGroup, *http.Request, *mux.RouteMatch, *mux.Route) ¦go.string."func(*mux.routeRegexpGroup, *http.Request, *mux.RouteMatch, *mux.Route)"þštype.func(*"".routeRegexpGroup, *net/http.Request, *"".RouteMatch, *"".Route)ÀÀ'8D3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P¦go.string."func(*mux.routeRegexpGroup, *http.Request, *mux.RouteMatch, *mux.Route)"p¬go.weak.type.*func(*"".routeRegexpGroup, *net/http.Request, *"".RouteMatch, *"".Route)€"runtime.zerovalue €štype.func(*"".routeRegexpGroup, *net/http.Request, *"".RouteMatch, *"".Route)ÐÀštype.func(*"".routeRegexpGroup, *net/http.Request, *"".RouteMatch, *"".Route)€2type.*"".routeRegexpGroup,type.*net/http.Request &type.*"".RouteMatch°type.*"".Routeþ(go.string."setMatch"@2setMatch (go.string."setMatch"þxgo.string."func(*http.Request, *mux.RouteMatch, *mux.Route)"‚0func(*http.Request, *mux.RouteMatch, *mux.Route) xgo.string."func(*http.Request, *mux.RouteMatch, *mux.Route)"þntype.func(*net/http.Request, *"".RouteMatch, *"".Route)°°C/Oµ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*http.Request, *mux.RouteMatch, *mux.Route)"p€go.weak.type.*func(*net/http.Request, *"".RouteMatch, *"".Route)€"runtime.zerovalue €ntype.func(*net/http.Request, *"".RouteMatch, *"".Route)аntype.func(*net/http.Request, *"".RouteMatch, *"".Route)€,type.*net/http.Request&type.*"".RouteMatch type.*"".Routeþ2type.*"".routeRegexpGroupÐÐϹ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*mux.routeRegexpGroup"pDgo.weak.type.**"".routeRegexpGroup€"runtime.zerovalue0type."".routeRegexpGroup` 2type.*"".routeRegexpGroupÀð2type.*"".routeRegexpGroupð(go.string."setMatch"€"go.importpath."".ntype.func(*net/http.Request, *"".RouteMatch, *"".Route) štype.func(*"".routeRegexpGroup, *net/http.Request, *"".RouteMatch, *"".Route)°>"".(*routeRegexpGroup).setMatchÀ>"".(*routeRegexpGroup).setMatchþPgo.string."func() *mux.routeRegexpGroup"`Zfunc() *mux.routeRegexpGroup Pgo.string."func() *mux.routeRegexpGroup"þ@type.func() *"".routeRegexpGroupúå3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func() *mux.routeRegexpGroup"pRgo.weak.type.*func() *"".routeRegexpGroup€"runtime.zerovalue €@type.func() *"".routeRegexpGroupЀ@type.func() *"".routeRegexpGroup€2type.*"".routeRegexpGroupþ8go.string."*mux.parentRoute"PB*mux.parentRoute 8go.string."*mux.parentRoute"þ(type.*"".parentRoute  Ǧ´Ê6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."*mux.parentRoute"p:go.weak.type.**"".parentRoute€"runtime.zerovalue&type."".parentRouteþbruntime.gcbits.0x8c000000000000000000000000000000 Œþ6go.string."mux.parentRoute"@@mux.parentRoute 6go.string."mux.parentRoute"þ*go.string."buildVars"@4 buildVars *go.string."buildVars"þ4go.string."getNamedRoutes"@>getNamedRoutes 4go.string."getNamedRoutes"þ4go.string."getRegexpGroup"@>getRegexpGroup 4go.string."getRegexpGroup"þ.go.string."parentRoute"@8 parentRoute .go.string."parentRoute"þ&type."".parentRoute  ý¬Ü& à runtime.algarray0bruntime.gcbits.0x8c000000000000000000000000000000P6go.string."mux.parentRoute"p(type.*"".parentRoute€"runtime.zerovalueÀ&type."".parentRouteÀ*go.string."buildVars"Ð"go.importpath."".à\type.func(map[string]string) map[string]stringð4go.string."getNamedRoutes"€"go.importpath."".@type.func() map[string]*"".Route 4go.string."getRegexpGroup"°"go.importpath."".À@type.func() *"".routeRegexpGroup`Ð&type."".parentRouteÐ.go.string."parentRoute"à"go.importpath."".ð &type."".parentRouteþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·564befda8e2e8cc7f35f6bc1d3c5e0a6 +«þ0go.string."*mux.matcher"@: *mux.matcher 0go.string."*mux.matcher"þ type.*"".matcher   +r°É6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."*mux.matcher"p2go.weak.type.**"".matcher€"runtime.zerovaluetype."".matcherþ.go.string."mux.matcher"@8 mux.matcher .go.string."mux.matcher"þ&go.string."matcher"00matcher &go.string."matcher"þtype."".matcherÀÀÓFq à runtime.algarray0bruntime.gcbits.0x8c000000000000000000000000000000P.go.string."mux.matcher"p type.*"".matcher€"runtime.zerovalueÀtype."".matcherÀ"go.string."Match"àbtype.func(*net/http.Request, *"".RouteMatch) bool`ðtype."".matcherð&go.string."matcher"€"go.importpath."".Àtype."".matcherþ2go.string."[]mux.matcher"@< []mux.matcher 2go.string."[]mux.matcher"þ"type.[]"".matcher  *Pƒ    runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P2go.string."[]mux.matcher"p4go.weak.type.*[]"".matcher€"runtime.zerovaluetype."".matcherþLgo.typelink.[]mux.matcher/[]"".matcher"type.[]"".matcherþgo.weak.type.**"".BuildVarsFunc€"runtime.zerovalue*type."".BuildVarsFuncþ:go.string."mux.BuildVarsFunc"PDmux.BuildVarsFunc :go.string."mux.BuildVarsFunc"þ2go.string."BuildVarsFunc"@< BuildVarsFunc 2go.string."BuildVarsFunc"þ*type."".BuildVarsFuncððÙˬ¯3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."mux.BuildVarsFunc"p,type.*"".BuildVarsFunc€"runtime.zerovalue €*type."".BuildVarsFuncÐ*type."".BuildVarsFunc€,type.map[string]string,type.map[string]string` *type."".BuildVarsFunc 2go.string."BuildVarsFunc"°"go.importpath."".Àð*type."".BuildVarsFuncþbruntime.gcbits.0x8c8c488484c488000000000000000000 ŒŒH„„Ĉþ*go.string."mux.Route"@4 mux.Route *go.string."mux.Route"þ$go.string."parent"0.parent $go.string."parent"þ&go.string."handler"00handler &go.string."handler"þ(go.string."matchers"@2matchers (go.string."matchers"þ*go.string."buildOnly"@4 buildOnly *go.string."buildOnly"þ go.string."name"0*name go.string."name"þgo.string."err"0(err go.string."err"þ2go.string."buildVarsFunc"@< buildVarsFunc 2go.string."buildVarsFunc"þtype."".Routeààp}0À  8@AHXhJ à runtime.algarray0bruntime.gcbits.0x8c8c488484c488000000000000000000P*go.string."mux.Route"ptype.*"".Route€"runtime.zerovalueÀtype."".RouteÀ$go.string."parent"Ð"go.importpath."".à&type."".parentRoute&go.string."handler" "go.importpath."".°*type.net/http.Handlerà(go.string."matchers"ð"go.importpath."".€"type.[]"".matcher°$go.string."regexp"À"go.importpath."".Ð2type.*"".routeRegexpGroup€.go.string."strictSlash""go.importpath."". type.boolÐ*go.string."buildOnly"à"go.importpath."".ðtype.bool  go.string."name"°"go.importpath."".Àtype.stringðgo.string."err"€"go.importpath."".type.errorÀ2go.string."buildVarsFunc"Ð"go.importpath."".à*type."".BuildVarsFunc`type."".Route"go.string."Route" "go.importpath."".°àtype."".Routeþ,go.string."*mux.Route"@6 +*mux.Route ,go.string."*mux.Route"þNgo.string."func(*mux.Route) *mux.Route"`Xfunc(*mux.Route) *mux.Route Ngo.string."func(*mux.Route) *mux.Route"þtype.func(*"".Router) *"".Route  ½±«ª3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(*mux.Router) *mux.Route"pPgo.weak.type.*func(*"".Router) *"".Route€"runtime.zerovalue €>type.func(*"".Router) *"".RouteÐ>type.func(*"".Router) *"".Route€type.*"".Routertype.*"".Routeþ‚go.string."func(*mux.Router, http.ResponseWriter, *http.Request)"Œ5func(*mux.Router, http.ResponseWriter, *http.Request) ‚go.string."func(*mux.Router, http.ResponseWriter, *http.Request)"þ‚type.func(*"".Router, net/http.ResponseWriter, *net/http.Request)°°|Îtž3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P‚go.string."func(*mux.Router, http.ResponseWriter, *http.Request)"p”go.weak.type.*func(*"".Router, net/http.ResponseWriter, *net/http.Request)€"runtime.zerovalue €‚type.func(*"".Router, net/http.ResponseWriter, *net/http.Request)а‚type.func(*"".Router, net/http.ResponseWriter, *net/http.Request)€type.*"".Router8type.net/http.ResponseWriter ,type.*net/http.Requestþ^go.string."func(*mux.Router, bool) *mux.Router"ph#func(*mux.Router, bool) *mux.Router ^go.string."func(*mux.Router, bool) *mux.Router"þLtype.func(*"".Router, bool) *"".Router°°?µÐ§3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*mux.Router, bool) *mux.Router"p^go.weak.type.*func(*"".Router, bool) *"".Router€"runtime.zerovalue €Ltype.func(*"".Router, bool) *"".RouterРLtype.func(*"".Router, bool) *"".Router€type.*"".Routertype.bool type.*"".Routerþ2go.string."*mux.WalkFunc"@< *mux.WalkFunc 2go.string."*mux.WalkFunc"þ"type.*"".WalkFunc  !o6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."*mux.WalkFunc"p4go.weak.type.**"".WalkFunc€"runtime.zerovalue type."".WalkFuncþ0go.string."mux.WalkFunc"@: mux.WalkFunc 0go.string."mux.WalkFunc"þ(go.string."WalkFunc"@2WalkFunc (go.string."WalkFunc"þ type."".WalkFunc[ÚEØ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."mux.WalkFunc"p"type.*"".WalkFunc€"runtime.zerovalue € type."".WalkFuncа type."".WalkFunc€type.*"".Routetype.*"".Router  type.[]*"".Route°type.error`À type."".WalkFuncÀ(go.string."WalkFunc"Ð"go.importpath."".à type."".WalkFuncþbgo.string."func(*mux.Router, mux.WalkFunc) error"pl%func(*mux.Router, mux.WalkFunc) error bgo.string."func(*mux.Router, mux.WalkFunc) error"þPtype.func(*"".Router, "".WalkFunc) error°°Ó‚ëƒ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."func(*mux.Router, mux.WalkFunc) error"pbgo.weak.type.*func(*"".Router, "".WalkFunc) error€"runtime.zerovalue €Ptype.func(*"".Router, "".WalkFunc) errorРPtype.func(*"".Router, "".WalkFunc) error€type.*"".Router type."".WalkFunc type.errorþ„go.string."func(*mux.Router, map[string]string) map[string]string"Ž6func(*mux.Router, map[string]string) map[string]string „go.string."func(*mux.Router, map[string]string) map[string]string"þttype.func(*"".Router, map[string]string) map[string]string°°*ˆæV3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P„go.string."func(*mux.Router, map[string]string) map[string]string"p†go.weak.type.*func(*"".Router, map[string]string) map[string]string€"runtime.zerovalue €ttype.func(*"".Router, map[string]string) map[string]stringРttype.func(*"".Router, map[string]string) map[string]string€type.*"".Router,type.map[string]string ,type.map[string]stringþfgo.string."func(*mux.Router) map[string]*mux.Route"pp'func(*mux.Router) map[string]*mux.Route fgo.string."func(*mux.Router) map[string]*mux.Route"þTtype.func(*"".Router) map[string]*"".Route  ¥ü²%3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(*mux.Router) map[string]*mux.Route"pfgo.weak.type.*func(*"".Router) map[string]*"".Route€"runtime.zerovalue €Ttype.func(*"".Router) map[string]*"".RouteÐTtype.func(*"".Router) map[string]*"".Route€type.*"".Router2type.map[string]*"".Routeþfgo.string."func(*mux.Router) *mux.routeRegexpGroup"pp'func(*mux.Router) *mux.routeRegexpGroup fgo.string."func(*mux.Router) *mux.routeRegexpGroup"þTtype.func(*"".Router) *"".routeRegexpGroup  ñ^]3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(*mux.Router) *mux.routeRegexpGroup"pfgo.weak.type.*func(*"".Router) *"".routeRegexpGroup€"runtime.zerovalue €Ttype.func(*"".Router) *"".routeRegexpGroupÐTtype.func(*"".Router) *"".routeRegexpGroup€type.*"".Router2type.*"".routeRegexpGroupþ~go.string."func(*mux.Router, mux.WalkFunc, []*mux.Route) error"ˆ3func(*mux.Router, mux.WalkFunc, []*mux.Route) error ~go.string."func(*mux.Router, mux.WalkFunc, []*mux.Route) error"þjtype.func(*"".Router, "".WalkFunc, []*"".Route) errorÀÀ›»ms3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P~go.string."func(*mux.Router, mux.WalkFunc, []*mux.Route) error"p|go.weak.type.*func(*"".Router, "".WalkFunc, []*"".Route) error€"runtime.zerovalue €jtype.func(*"".Router, "".WalkFunc, []*"".Route) errorаjtype.func(*"".Router, "".WalkFunc, []*"".Route) error€type.*"".Router type."".WalkFunc  type.[]*"".Route°type.errorþ\go.string."func(mux.BuildVarsFunc) *mux.Route"pf"func(mux.BuildVarsFunc) *mux.Route \go.string."func(mux.BuildVarsFunc) *mux.Route"þJtype.func("".BuildVarsFunc) *"".Route  ã^cÄ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(mux.BuildVarsFunc) *mux.Route"p\go.weak.type.*func("".BuildVarsFunc) *"".Route€"runtime.zerovalue €Jtype.func("".BuildVarsFunc) *"".RouteÐJtype.func("".BuildVarsFunc) *"".Route€*type."".BuildVarsFunctype.*"".Routeþgo.string."Get"0(Get go.string."Get"þFgo.string."func(string) *mux.Route"PPfunc(string) *mux.Route Fgo.string."func(string) *mux.Route"þ6type.func(string) *"".Route  ¸Q¬ð3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."func(string) *mux.Route"pHgo.weak.type.*func(string) *"".Route€"runtime.zerovalue €6type.func(string) *"".RouteÐ6type.func(string) *"".Route€type.stringtype.*"".Routeþ(go.string."GetRoute"@2GetRoute (go.string."GetRoute"þ$go.string."Handle"0.Handle $go.string."Handle"þbgo.string."func(string, http.Handler) *mux.Route"pl%func(string, http.Handler) *mux.Route bgo.string."func(string, http.Handler) *mux.Route"þZtype.func(string, net/http.Handler) *"".Route°° z3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pbgo.string."func(string, http.Handler) *mux.Route"plgo.weak.type.*func(string, net/http.Handler) *"".Route€"runtime.zerovalue €Ztype.func(string, net/http.Handler) *"".RouteРZtype.func(string, net/http.Handler) *"".Route€type.string*type.net/http.Handler type.*"".Routeþ,go.string."HandleFunc"@6 +HandleFunc ,go.string."HandleFunc"þšgo.string."func(string, func(http.ResponseWriter, *http.Request)) *mux.Route"°¤Afunc(string, func(http.ResponseWriter, *http.Request)) *mux.Route šgo.string."func(string, func(http.ResponseWriter, *http.Request)) *mux.Route"þštype.func(string, func(net/http.ResponseWriter, *net/http.Request)) *"".Route°°ÉŠnè3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pšgo.string."func(string, func(http.ResponseWriter, *http.Request)) *mux.Route"p¬go.weak.type.*func(string, func(net/http.ResponseWriter, *net/http.Request)) *"".Route€"runtime.zerovalue €štype.func(string, func(net/http.ResponseWriter, *net/http.Request)) *"".RouteРštype.func(string, func(net/http.ResponseWriter, *net/http.Request)) *"".Route€type.stringjtype.func(net/http.ResponseWriter, *net/http.Request) type.*"".Routeþ&go.string."Headers"00Headers &go.string."Headers"þLgo.string."func(...string) *mux.Route"`Vfunc(...string) *mux.Route Lgo.string."func(...string) *mux.Route"þtype.func(*"".Router) *"".Routeð *"".(*Router).NewRoute€ +*"".(*Router).NewRoute + go.string."Path"° +6type.func(string) *"".RouteÀ +Ntype.func(*"".Router, string) *"".RouteÐ +""".(*Router).Pathà +""".(*Router).Pathð +,go.string."PathPrefix" 6type.func(string) *"".Route  Ntype.func(*"".Router, string) *"".Route° ."".(*Router).PathPrefixÀ ."".(*Router).PathPrefixÐ &go.string."Queries"ð type.func(*"".Route) *"".Router  á;è!3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(*mux.Route) *mux.Router"pPgo.weak.type.*func(*"".Route) *"".Router€"runtime.zerovalue €>type.func(*"".Route) *"".RouterÐ>type.func(*"".Route) *"".Router€type.*"".Routetype.*"".Routerþrgo.string."func(*mux.Route, ...string) (*url.URL, error)"€|-func(*mux.Route, ...string) (*url.URL, error) rgo.string."func(*mux.Route, ...string) (*url.URL, error)"þjtype.func(*"".Route, ...string) (*net/url.URL, error)ÀÀÀÐUi3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Prgo.string."func(*mux.Route, ...string) (*url.URL, error)"p|go.weak.type.*func(*"".Route, ...string) (*net/url.URL, error)€"runtime.zerovalue €jtype.func(*"".Route, ...string) (*net/url.URL, error)Рjtype.func(*"".Route, ...string) (*net/url.URL, error)€type.*"".Routetype.[]string "type.*net/url.URL°type.errorþhgo.string."func(*mux.Route, mux.matcher) *mux.Route"€r(func(*mux.Route, mux.matcher) *mux.Route hgo.string."func(*mux.Route, mux.matcher) *mux.Route"þTtype.func(*"".Route, "".matcher) *"".Route°°˜±é3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(*mux.Route, mux.matcher) *mux.Route"pfgo.weak.type.*func(*"".Route, "".matcher) *"".Route€"runtime.zerovalue €Ttype.func(*"".Route, "".matcher) *"".RouteРTtype.func(*"".Route, "".matcher) *"".Route€type.*"".Routetype."".matcher type.*"".Routeþxgo.string."func(*mux.Route, string, bool, bool, bool) error"‚0func(*mux.Route, string, bool, bool, bool) error xgo.string."func(*mux.Route, string, bool, bool, bool) error"þhtype.func(*"".Route, string, bool, bool, bool) erroràà\7ŒX3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*mux.Route, string, bool, bool, bool) error"pzgo.weak.type.*func(*"".Route, string, bool, bool, bool) error€"runtime.zerovalue €htype.func(*"".Route, string, bool, bool, bool) errorÐÐhtype.func(*"".Route, string, bool, bool, bool) error€type.*"".Routetype.string type.bool°type.boolÀtype.boolÐtype.errorþ‚go.string."func(*mux.Route, map[string]string) map[string]string"Œ5func(*mux.Route, map[string]string) map[string]string ‚go.string."func(*mux.Route, map[string]string) map[string]string"þrtype.func(*"".Route, map[string]string) map[string]string°°ÎWG3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P‚go.string."func(*mux.Route, map[string]string) map[string]string"p„go.weak.type.*func(*"".Route, map[string]string) map[string]string€"runtime.zerovalue €rtype.func(*"".Route, map[string]string) map[string]stringРrtype.func(*"".Route, map[string]string) map[string]string€type.*"".Route,type.map[string]string ,type.map[string]stringþdgo.string."func(*mux.Route) map[string]*mux.Route"pn&func(*mux.Route) map[string]*mux.Route dgo.string."func(*mux.Route) map[string]*mux.Route"þRtype.func(*"".Route) map[string]*"".Route  Xéš 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pdgo.string."func(*mux.Route) map[string]*mux.Route"pdgo.weak.type.*func(*"".Route) map[string]*"".Route€"runtime.zerovalue €Rtype.func(*"".Route) map[string]*"".RouteÐRtype.func(*"".Route) map[string]*"".Route€type.*"".Route2type.map[string]*"".Routeþdgo.string."func(*mux.Route) *mux.routeRegexpGroup"pn&func(*mux.Route) *mux.routeRegexpGroup dgo.string."func(*mux.Route) *mux.routeRegexpGroup"þRtype.func(*"".Route) *"".routeRegexpGroup  = +3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pdgo.string."func(*mux.Route) *mux.routeRegexpGroup"pdgo.weak.type.*func(*"".Route) *"".routeRegexpGroup€"runtime.zerovalue €Rtype.func(*"".Route) *"".routeRegexpGroupÐRtype.func(*"".Route) *"".routeRegexpGroup€type.*"".Route2type.*"".routeRegexpGroupþ„go.string."func(*mux.Route, ...string) (map[string]string, error)"Ž6func(*mux.Route, ...string) (map[string]string, error) „go.string."func(*mux.Route, ...string) (map[string]string, error)"þttype.func(*"".Route, ...string) (map[string]string, error)ÀÀØxn3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P„go.string."func(*mux.Route, ...string) (map[string]string, error)"p†go.weak.type.*func(*"".Route, ...string) (map[string]string, error)€"runtime.zerovalue €ttype.func(*"".Route, ...string) (map[string]string, error)Рttype.func(*"".Route, ...string) (map[string]string, error)€type.*"".Routetype.[]string ,type.map[string]string°type.errorþ*go.string."BuildOnly"@4 BuildOnly *go.string."BuildOnly"þ(go.string."GetError"@2GetError (go.string."GetError"þ0go.string."func() error"@: func() error 0go.string."func() error"þ"type.func() errorœ‚Öµ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."func() error"p4go.weak.type.*func() error€"runtime.zerovalue €"type.func() errorЀ"type.func() error€type.errorþ,go.string."GetHandler"@6 +GetHandler ,go.string."GetHandler"þ>go.string."func() http.Handler"PHfunc() http.Handler >go.string."func() http.Handler"þ8type.func() net/http.Handler¥ë53 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."func() http.Handler"pJgo.weak.type.*func() net/http.Handler€"runtime.zerovalue €8type.func() net/http.HandlerЀ8type.func() net/http.Handler€*type.net/http.Handlerþ&go.string."GetName"00GetName &go.string."GetName"þ2go.string."func() string"@< func() string 2go.string."func() string"þ$type.func() string¢mË3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P2go.string."func() string"p6go.weak.type.*func() string€"runtime.zerovalue €$type.func() stringЀ$type.func() string€type.stringþRgo.string."func(http.Handler) *mux.Route"`\func(http.Handler) *mux.Route Rgo.string."func(http.Handler) *mux.Route"þJtype.func(net/http.Handler) *"".Route   {vN3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."func(http.Handler) *mux.Route"p\go.weak.type.*func(net/http.Handler) *"".Route€"runtime.zerovalue €Jtype.func(net/http.Handler) *"".RouteÐJtype.func(net/http.Handler) *"".Route€*type.net/http.Handlertype.*"".Routeþ.go.string."HandlerFunc"@8 HandlerFunc .go.string."HandlerFunc"þŠgo.string."func(func(http.ResponseWriter, *http.Request)) *mux.Route" ”9func(func(http.ResponseWriter, *http.Request)) *mux.Route Šgo.string."func(func(http.ResponseWriter, *http.Request)) *mux.Route"þŠtype.func(func(net/http.ResponseWriter, *net/http.Request)) *"".Route  ßì5A3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PŠgo.string."func(func(http.ResponseWriter, *http.Request)) *mux.Route"pœgo.weak.type.*func(func(net/http.ResponseWriter, *net/http.Request)) *"".Route€"runtime.zerovalue €Štype.func(func(net/http.ResponseWriter, *net/http.Request)) *"".RouteЊtype.func(func(net/http.ResponseWriter, *net/http.Request)) *"".Route€jtype.func(net/http.ResponseWriter, *net/http.Request)type.*"".Routeþ2go.string."HeadersRegexp"@< HeadersRegexp 2go.string."HeadersRegexp"þ go.string."Name"0*Name go.string."Name"þ*go.string."Subrouter"@4 Subrouter *go.string."Subrouter"þgo.weak.type.*func() *"".Router€"runtime.zerovalue €,type.func() *"".RouterЀ,type.func() *"".Router€type.*"".Routerþgo.string."URL"0(URL go.string."URL"þZgo.string."func(...string) (*url.URL, error)"pd!func(...string) (*url.URL, error) Zgo.string."func(...string) (*url.URL, error)"þTtype.func(...string) (*net/url.URL, error)°°4IÍ¡3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."func(...string) (*url.URL, error)"pfgo.weak.type.*func(...string) (*net/url.URL, error)€"runtime.zerovalue €Ttype.func(...string) (*net/url.URL, error)ÐTtype.func(...string) (*net/url.URL, error)€type.[]string"type.*net/url.URL type.errorþ&go.string."URLHost"00URLHost &go.string."URLHost"þ&go.string."URLPath"00URLPath &go.string."URLPath"þ,go.string."addMatcher"@6 +addMatcher ,go.string."addMatcher"þPgo.string."func(mux.matcher) *mux.Route"`Zfunc(mux.matcher) *mux.Route Pgo.string."func(mux.matcher) *mux.Route"þ>type.func("".matcher) *"".Route  rÄ…Œ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(mux.matcher) *mux.Route"pPgo.weak.type.*func("".matcher) *"".Route€"runtime.zerovalue €>type.func("".matcher) *"".RouteÐ>type.func("".matcher) *"".Route€type."".matchertype.*"".Routeþ8go.string."addRegexpMatcher"PBaddRegexpMatcher 8go.string."addRegexpMatcher"þ`go.string."func(string, bool, bool, bool) error"pj$func(string, bool, bool, bool) error `go.string."func(string, bool, bool, bool) error"þRtype.func(string, bool, bool, bool) errorÐУýü3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."func(string, bool, bool, bool) error"pdgo.weak.type.*func(string, bool, bool, bool) error€"runtime.zerovalue €Rtype.func(string, bool, bool, bool) errorÐÀRtype.func(string, bool, bool, bool) error€type.stringtype.bool type.bool°type.boolÀtype.errorþ.go.string."prepareVars"@8 prepareVars .go.string."prepareVars"þlgo.string."func(...string) (map[string]string, error)"€v*func(...string) (map[string]string, error) lgo.string."func(...string) (map[string]string, error)"þ^type.func(...string) (map[string]string, error)°°äN–D3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(...string) (map[string]string, error)"ppgo.weak.type.*func(...string) (map[string]string, error)€"runtime.zerovalue €^type.func(...string) (map[string]string, error)Ð^type.func(...string) (map[string]string, error)€type.[]string,type.map[string]string type.errorþtype.*"".Routeðða[Ó6´   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*mux.Route"p.go.weak.type.**"".Route€"runtime.zerovaluetype."".Route` type.*"".RouteÀðtype.*"".Routeð*go.string."BuildOnly"*type.func() *"".Route type.func(*"".Route) *"".Routerð*"".(*Route).Subrouter€*"".(*Route).Subroutergo.string."URL"°Ttype.func(...string) (*net/url.URL, error)Àjtype.func(*"".Route, ...string) (*net/url.URL, error)Ð"".(*Route).URLà"".(*Route).URLð&go.string."URLHost"Ttype.func(...string) (*net/url.URL, error) jtype.func(*"".Route, ...string) (*net/url.URL, error)°&"".(*Route).URLHostÀ&"".(*Route).URLHostÐ&go.string."URLPath"ðTtype.func(...string) (*net/url.URL, error)€jtype.func(*"".Route, ...string) (*net/url.URL, error)&"".(*Route).URLPath &"".(*Route).URLPath°,go.string."addMatcher"À"go.importpath."".Ð>type.func("".matcher) *"".RouteàTtype.func(*"".Route, "".matcher) *"".Routeð,"".(*Route).addMatcher€,"".(*Route).addMatcher8go.string."addRegexpMatcher" "go.importpath."".°Rtype.func(string, bool, bool, bool) errorÀhtype.func(*"".Route, string, bool, bool, bool) errorÐ8"".(*Route).addRegexpMatcherà8"".(*Route).addRegexpMatcherð*go.string."buildVars"€"go.importpath."".\type.func(map[string]string) map[string]string rtype.func(*"".Route, map[string]string) map[string]string°*"".(*Route).buildVarsÀ*"".(*Route).buildVarsÐ4go.string."getNamedRoutes"à"go.importpath."".ð@type.func() map[string]*"".Route€Rtype.func(*"".Route) map[string]*"".Route4"".(*Route).getNamedRoutes 4"".(*Route).getNamedRoutes°4go.string."getRegexpGroup"À"go.importpath."".Ð@type.func() *"".routeRegexpGroupàRtype.func(*"".Route) *"".routeRegexpGroupð4"".(*Route).getRegexpGroup€4"".(*Route).getRegexpGroup.go.string."prepareVars" "go.importpath."".°^type.func(...string) (map[string]string, error)Àttype.func(*"".Route, ...string) (map[string]string, error)Ð."".(*Route).prepareVarsà."".(*Route).prepareVarsþbruntime.gcbits.0x88888888000000000000000000000000 ˆˆˆˆþ2go.string."[8]*mux.Route"@< [8]*mux.Route 2go.string."[8]*mux.Route"þ"type.[8]*"".RouteÀÀ@OŒ`  runtime.algarray0bruntime.gcbits.0x88888888000000000000000000000000P2go.string."[8]*mux.Route"p4go.weak.type.*[8]*"".Route€"runtime.zerovaluetype.*"".Route  type.[]*"".RouteþLgo.typelink.[8]*mux.Route/[8]*"".Route"type.[8]*"".RouteþRgo.string."*map.bucket[string]*mux.Route"`\*map.bucket[string]*mux.Route Rgo.string."*map.bucket[string]*mux.Route"þBtype.*map.bucket[string]*"".Route  ®!Om6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."*map.bucket[string]*mux.Route"pTgo.weak.type.**map.bucket[string]*"".Route€"runtime.zerovalue@type.map.bucket[string]*"".Routeþbruntime.gcbits.0x84848484848484848488888888000000 „„„„„„„„„ˆˆˆˆþPgo.string."map.bucket[string]*mux.Route"`Zmap.bucket[string]*mux.Route Pgo.string."map.bucket[string]*mux.Route"þ@type.map.bucket[string]*"".Route°°ÐŒ‹ùˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000PPgo.string."map.bucket[string]*mux.Route"pRgo.weak.type.*map.bucket[string]*"".Route€"runtime.zerovalueÀ@type.map.bucket[string]*"".RouteÀ go.string."keys"àtype.[8]string$go.string."values"°"type.[8]*"".Routeà(go.string."overflow"€Btype.*map.bucket[string]*"".RouteþJgo.string."map.hdr[string]*mux.Route"`Tmap.hdr[string]*mux.Route Jgo.string."map.hdr[string]*mux.Route"þ:type.map.hdr[string]*"".Routeàà0 ÿ  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PJgo.string."map.hdr[string]*mux.Route"pLgo.weak.type.*map.hdr[string]*"".Route€"runtime.zerovalueÀ:type.map.hdr[string]*"".RouteÀ&go.string."buckets"àBtype.*map.bucket[string]*"".Route,go.string."oldbuckets"°Btype.*map.bucket[string]*"".RouteþBgo.string."map[string]*mux.Route"PLmap[string]*mux.Route Bgo.string."map[string]*mux.Route"þ2type.map[string]*"".RouteÜÜé´UÏ5Ð € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."map[string]*mux.Route"pDgo.weak.type.*map[string]*"".Route€"runtime.zerovaluetype.string type.*"".Route°@type.map.bucket[string]*"".RouteÀ:type.map.hdr[string]*"".Routeþlgo.typelink.map[string]*mux.Route/map[string]*"".Route2type.map[string]*"".Routeþ.go.string."**mux.Route"@8 **mux.Route .go.string."**mux.Route"þtype.**"".Route    6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P.go.string."**mux.Route"p0go.weak.type.***"".Route€"runtime.zerovaluetype.*"".Routeþ^runtime.gcbits.0x000000000000000000000000000000 þ2go.string."[0]*mux.Route"@< [0]*mux.Route 2go.string."[0]*mux.Route"þ"type.[0]*"".RouteÀÀoM–œ‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P2go.string."[0]*mux.Route"p4go.weak.type.*[0]*"".Route€"runtime.zerovaluetype.*"".Route  type.[]*"".RouteþLgo.typelink.[0]*mux.Route/[0]*"".Route"type.[0]*"".Routeþ4go.string."*[0]*mux.Route"@>*[0]*mux.Route 4go.string."*[0]*mux.Route"þ$type.*[0]*"".Route  ––öQ6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."*[0]*mux.Route"p6go.weak.type.**[0]*"".Route€"runtime.zerovalue"type.[0]*"".Routeþ6go.string."*mux.contextKey"@@*mux.contextKey 6go.string."*mux.contextKey"þ&type.*"".contextKey  *é 6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P6go.string."*mux.contextKey"p8go.weak.type.**"".contextKey€"runtime.zerovalue$type."".contextKeyþ4go.string."mux.contextKey"@>mux.contextKey 4go.string."mux.contextKey"þ,go.string."contextKey"@6 +contextKey ,go.string."contextKey"þ$type."".contextKeyààT5M!‚   runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P4go.string."mux.contextKey"p&type.*"".contextKey€"runtime.zerovalue`$type."".contextKey,go.string."contextKey" "go.importpath."".°à$type."".contextKeyþbruntime.gcbits.0xcc000000000000000000000000000000 Ìþ0go.string."interface {}"@: interface {} 0go.string."interface {}"þ"type.interface {}ÀÀçW  € runtime.algarray0bruntime.gcbits.0xcc000000000000000000000000000000P0go.string."interface {}"p4go.weak.type.*interface {}€"runtime.zerovalueÀ"type.interface {}þ4go.string."[]interface {}"@>[]interface {} 4go.string."[]interface {}"þ&type.[]interface {}  p“ê/   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]interface {}"p8go.weak.type.*[]interface {}€"runtime.zerovalue"type.interface {}þRgo.typelink.[]interface {}/[]interface {}&type.[]interface {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þgo.weak.type.*[8]*regexp.Regexp€"runtime.zerovalue&type.*regexp.Regexp *type.[]*regexp.Regexpþ^go.typelink.[8]*regexp.Regexp/[8]*regexp.Regexp,type.[8]*regexp.RegexpþZgo.string."*map.bucket[string]*regexp.Regexp"pd!*map.bucket[string]*regexp.Regexp Zgo.string."*map.bucket[string]*regexp.Regexp"þLtype.*map.bucket[string]*regexp.Regexp   ¯e±6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."*map.bucket[string]*regexp.Regexp"p^go.weak.type.**map.bucket[string]*regexp.Regexp€"runtime.zerovalueJtype.map.bucket[string]*regexp.RegexpþXgo.string."map.bucket[string]*regexp.Regexp"pb map.bucket[string]*regexp.Regexp Xgo.string."map.bucket[string]*regexp.Regexp"þJtype.map.bucket[string]*regexp.Regexp°°Ð5D›RˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000PXgo.string."map.bucket[string]*regexp.Regexp"p\go.weak.type.*map.bucket[string]*regexp.Regexp€"runtime.zerovalueÀJtype.map.bucket[string]*regexp.RegexpÀ go.string."keys"àtype.[8]string$go.string."values"°,type.[8]*regexp.Regexpà(go.string."overflow"€Ltype.*map.bucket[string]*regexp.RegexpþRgo.string."map.hdr[string]*regexp.Regexp"`\map.hdr[string]*regexp.Regexp Rgo.string."map.hdr[string]*regexp.Regexp"þDtype.map.hdr[string]*regexp.Regexpàà0¾÷¿  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PRgo.string."map.hdr[string]*regexp.Regexp"pVgo.weak.type.*map.hdr[string]*regexp.Regexp€"runtime.zerovalueÀDtype.map.hdr[string]*regexp.RegexpÀ&go.string."buckets"àLtype.*map.bucket[string]*regexp.Regexp,go.string."oldbuckets"°Ltype.*map.bucket[string]*regexp.RegexpþJgo.string."map[string]*regexp.Regexp"`Tmap[string]*regexp.Regexp Jgo.string."map[string]*regexp.Regexp"þtype.map.bucket[string][]stringþ,Ftype..gc.map.bucket[string][]string,þNtype..gcprog.map.bucket[string][]string*™™™™Y–eY–e þLgo.string."map.bucket[string][]string"`Vmap.bucket[string][]string Lgo.string."map.bucket[string][]string"þ>type.map.bucket[string][]string°°PúTJ¹YˆH à runtime.algarray0Ftype..gc.map.bucket[string][]string@Ntype..gcprog.map.bucket[string][]stringPLgo.string."map.bucket[string][]string"pPgo.weak.type.*map.bucket[string][]string€"runtime.zerovalueÀ>type.map.bucket[string][]stringÀ go.string."keys"àtype.[8]string$go.string."values"° type.[8][]stringà(go.string."overflow"€@type.*map.bucket[string][]stringþFgo.string."map.hdr[string][]string"PPmap.hdr[string][]string Fgo.string."map.hdr[string][]string"þ8type.map.hdr[string][]stringàà0–‹˜  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PFgo.string."map.hdr[string][]string"pJgo.weak.type.*map.hdr[string][]string€"runtime.zerovalueÀ8type.map.hdr[string][]stringÀ&go.string."buckets"à@type.*map.bucket[string][]string,go.string."oldbuckets"°@type.*map.bucket[string][]stringþ>go.string."map[string][]string"PHmap[string][]string >go.string."map[string][]string"þ0type.map[string][]stringÜÜ'>@5P € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."map[string][]string"pBgo.weak.type.*map[string][]string€"runtime.zerovaluetype.string type.[]string°>type.map.bucket[string][]stringÀ8type.map.hdr[string][]stringþfgo.typelink.map[string][]string/map[string][]string0type.map[string][]stringþDgo.string."*map.hdr[string]string"PN*map.hdr[string]string Dgo.string."*map.hdr[string]string"þ6type.*map.hdr[string]string  ºÆ¼6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."*map.hdr[string]string"pHgo.weak.type.**map.hdr[string]string€"runtime.zerovalue4type.map.hdr[string]stringþ*go.string."[]uintptr"@4 []uintptr *go.string."[]uintptr"þtype.[]uintptr  »3À]   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P*go.string."[]uintptr"p.go.weak.type.*[]uintptr€"runtime.zerovaluetype.uintptrþ>go.typelink.[]uintptr/[]uintptrtype.[]uintptrþ,go.string."[4]uintptr"@6 +[4]uintptr ,go.string."[4]uintptr"þtype.[4]uintptrÀÀ l<‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P,go.string."[4]uintptr"p0go.weak.type.*[4]uintptr€"runtime.zerovaluetype.uintptr type.[]uintptrþBgo.typelink.[4]uintptr/[4]uintptrtype.[4]uintptrþbruntime.gcbits.0x88888844440000000000000000000000 ˆˆˆDDþDgo.string."map.iter[string]string"PNmap.iter[string]string Dgo.string."map.iter[string]string"þgo.string."key"0(key go.string."key"þgo.string."val"0(val go.string."val"þgo.string."t"0$t go.string."t"þgo.string."h"0$h go.string."h"þ go.string."bptr"0*bptr go.string."bptr"þ"go.string."other"0,other "go.string."other"þ6type.map.iter[string]stringððP¹…\ (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PDgo.string."map.iter[string]string"pHgo.weak.type.*map.iter[string]string€"runtime.zerovalueÀ6type.map.iter[string]stringÀgo.string."key"àtype.*stringgo.string."val"°type.*stringàgo.string."t"€type.*uint8°go.string."h"Ð6type.*map.hdr[string]string€&go.string."buckets" go.weak.type.**"".headerMatcher€"runtime.zerovalue*type."".headerMatcher` ,type.*"".headerMatcherÀð,type.*"".headerMatcherð"go.string."Match"btype.func(*net/http.Request, *"".RouteMatch) bool ˆtype.func(*"".headerMatcher, *net/http.Request, *"".RouteMatch) bool°2"".(*headerMatcher).MatchÀ2"".(*headerMatcher).Matchþ:go.string."mux.headerMatcher"PDmux.headerMatcher :go.string."mux.headerMatcher"þgo.string."func(mux.headerMatcher, *http.Request, *mux.RouteMatch) bool" š<func(mux.headerMatcher, *http.Request, *mux.RouteMatch) bool go.string."func(mux.headerMatcher, *http.Request, *mux.RouteMatch) bool"þ†type.func("".headerMatcher, *net/http.Request, *"".RouteMatch) boolÀÀßÛÌ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pgo.string."func(mux.headerMatcher, *http.Request, *mux.RouteMatch) bool"p˜go.weak.type.*func("".headerMatcher, *net/http.Request, *"".RouteMatch) bool€"runtime.zerovalue €†type.func("".headerMatcher, *net/http.Request, *"".RouteMatch) boolа†type.func("".headerMatcher, *net/http.Request, *"".RouteMatch) bool€*type."".headerMatcher,type.*net/http.Request &type.*"".RouteMatch°type.boolþ*type."".headerMatcherñ{K5$ € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P:go.string."mux.headerMatcher"p,type.*"".headerMatcher€"runtime.zerovaluetype.string type.string°:type.map.bucket[string]stringÀ4type.map.hdr[string]string`à*type."".headerMatcherà2go.string."headerMatcher"ð"go.importpath."".€°*type."".headerMatcher°"go.string."Match"Ðbtype.func(*net/http.Request, *"".RouteMatch) boolà†type.func("".headerMatcher, *net/http.Request, *"".RouteMatch) boolð,"".headerMatcher.Match€,"".headerMatcher.MatchþFgo.string."*mux.headerRegexMatcher"PP*mux.headerRegexMatcher Fgo.string."*mux.headerRegexMatcher"þgo.weak.type.**"".methodMatcher€"runtime.zerovalue*type."".methodMatcher` ,type.*"".methodMatcherÀð,type.*"".methodMatcherð"go.string."Match"btype.func(*net/http.Request, *"".RouteMatch) bool ˆtype.func(*"".methodMatcher, *net/http.Request, *"".RouteMatch) bool°2"".(*methodMatcher).MatchÀ2"".(*methodMatcher).Matchþ:go.string."mux.methodMatcher"PDmux.methodMatcher :go.string."mux.methodMatcher"þgo.string."func(mux.methodMatcher, *http.Request, *mux.RouteMatch) bool" š<func(mux.methodMatcher, *http.Request, *mux.RouteMatch) bool go.string."func(mux.methodMatcher, *http.Request, *mux.RouteMatch) bool"þ†type.func("".methodMatcher, *net/http.Request, *"".RouteMatch) boolÀÀÆã˜ô3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pgo.string."func(mux.methodMatcher, *http.Request, *mux.RouteMatch) bool"p˜go.weak.type.*func("".methodMatcher, *net/http.Request, *"".RouteMatch) bool€"runtime.zerovalue €†type.func("".methodMatcher, *net/http.Request, *"".RouteMatch) boolа†type.func("".methodMatcher, *net/http.Request, *"".RouteMatch) bool€*type."".methodMatcher,type.*net/http.Request &type.*"".RouteMatch°type.boolþ*type."".methodMatcherÐÐdŒ   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P:go.string."mux.methodMatcher"p,type.*"".methodMatcher€"runtime.zerovaluetype.string` *type."".methodMatcher 2go.string."methodMatcher"°"go.importpath."".Àð*type."".methodMatcherð"go.string."Match"btype.func(*net/http.Request, *"".RouteMatch) bool †type.func("".methodMatcher, *net/http.Request, *"".RouteMatch) bool°2"".(*methodMatcher).MatchÀ,"".methodMatcher.Matchþgo.weak.type.**"".schemeMatcher€"runtime.zerovalue*type."".schemeMatcher` ,type.*"".schemeMatcherÀð,type.*"".schemeMatcherð"go.string."Match"btype.func(*net/http.Request, *"".RouteMatch) bool ˆtype.func(*"".schemeMatcher, *net/http.Request, *"".RouteMatch) bool°2"".(*schemeMatcher).MatchÀ2"".(*schemeMatcher).Matchþ:go.string."mux.schemeMatcher"PDmux.schemeMatcher :go.string."mux.schemeMatcher"þgo.string."func(mux.schemeMatcher, *http.Request, *mux.RouteMatch) bool" š<func(mux.schemeMatcher, *http.Request, *mux.RouteMatch) bool go.string."func(mux.schemeMatcher, *http.Request, *mux.RouteMatch) bool"þ†type.func("".schemeMatcher, *net/http.Request, *"".RouteMatch) boolÀÀ­ÏÈñ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pgo.string."func(mux.schemeMatcher, *http.Request, *mux.RouteMatch) bool"p˜go.weak.type.*func("".schemeMatcher, *net/http.Request, *"".RouteMatch) bool€"runtime.zerovalue €†type.func("".schemeMatcher, *net/http.Request, *"".RouteMatch) boolа†type.func("".schemeMatcher, *net/http.Request, *"".RouteMatch) bool€*type."".schemeMatcher,type.*net/http.Request &type.*"".RouteMatch°type.boolþ*type."".schemeMatcherÐнò—)   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P:go.string."mux.schemeMatcher"p,type.*"".schemeMatcher€"runtime.zerovaluetype.string` *type."".schemeMatcher 2go.string."schemeMatcher"°"go.importpath."".Àð*type."".schemeMatcherð"go.string."Match"btype.func(*net/http.Request, *"".RouteMatch) bool †type.func("".schemeMatcher, *net/http.Request, *"".RouteMatch) bool°2"".(*schemeMatcher).MatchÀ,"".schemeMatcher.Matchþ,go.string."*[8]string"@6 +*[8]string ,go.string."*[8]string"þtype.*[8]string  ­”o6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[8]string"p0go.weak.type.**[8]string€"runtime.zerovaluetype.[8]stringþ&go.string."runtime"00runtime &go.string."runtime"þ,go.importpath.runtime.  &go.string."runtime"þ&go.string."net/url"00net/url &go.string."net/url"þ,go.importpath.net/url.  &go.string."net/url"þ$go.string."errors"0.errors $go.string."errors"þ*go.importpath.errors.  $go.string."errors"þ(go.string."net/http"@2net/http (go.string."net/http"þ.go.importpath.net/http.  (go.string."net/http"þ&go.string."strconv"00strconv &go.string."strconv"þ,go.importpath.strconv.  &go.string."strconv"þ&go.string."strings"00strings &go.string."strings"þ,go.importpath.strings.  &go.string."strings"þgo.string."fmt"0(fmt go.string."fmt"þ$go.importpath.fmt.  go.string."fmt"þ¢go.string."github.com/fsouza/go-dockerclient/external/github.com/gorilla/context"°¬Egithub.com/fsouza/go-dockerclient/external/github.com/gorilla/context ¢go.string."github.com/fsouza/go-dockerclient/external/github.com/gorilla/context"þ¨go.importpath.github.com/fsouza/go-dockerclient/external/github.com/gorilla/context. E ¢go.string."github.com/fsouza/go-dockerclient/external/github.com/gorilla/context"þ*go.importpath.regexp.  $go.string."regexp"þ"go.string."bytes"0,bytes "go.string."bytes"þ(go.importpath.bytes.  "go.string."bytes"þ&go.importpath.path.  go.string."path"þ6"".parentRoute.buildVars·f0"".parentRoute.buildVarsþ@"".parentRoute.getNamedRoutes·f:"".parentRoute.getNamedRoutesþ@"".parentRoute.getRegexpGroup·f:"".parentRoute.getRegexpGroupþ.type..hash.[8]string·f(type..hash.[8]stringþ$runtime.strhash·fruntime.strhashþ*type..eq.[8]string·f$type..eq.[8]stringþ&"".matcher.Match·f "".matcher.Matchþ4"".(*MatcherFunc).Match·f."".(*MatcherFunc).Matchþ(runtime.panicwrap·f"runtime.panicwrapþ:type..hash.[1]interface {}·f4type..hash.[1]interface {}þ.runtime.nilinterhash·f(runtime.nilinterhashþ6type..eq.[1]interface {}·f0type..eq.[1]interface {}þ$runtime.efaceeq·fruntime.efaceeqþ:type..hash.[3]interface {}·f4type..hash.[3]interface {}þ6type..eq.[3]interface {}·f0type..eq.[3]interface {}þ:type..hash.[2]interface {}·f4type..hash.[2]interface {}þ6type..eq.[2]interface {}·f0type..eq.[2]interface {}þ8"".(*headerMatcher).Match·f2"".(*headerMatcher).MatchþB"".(*headerRegexMatcher).Match·f<"".(*headerRegexMatcher).Matchþ8"".(*methodMatcher).Match·f2"".(*methodMatcher).Matchþ8"".(*schemeMatcher).Match·f2"".(*schemeMatcher).Matchþ"runtime.zerovalue0ÿÿgo13ld \ No newline at end of file diff --git a/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user.a b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user.a new file mode 100644 index 0000000000000000000000000000000000000000..d0d368bcc7eea35b557e35ce7b52054e67d54653 GIT binary patch literal 126996 zcmeFa51brTl`mdhGnq{OkQp&TM8swsFoZCv>i-Q0g9KTJOZiMxBxQ^9D8!S4OJwk+Grbz(yzDBVT&lnBrJ@pc3!l zM=jJD8L5nn#(TuG7EVbADz)zl(o{xk-=ozLyE1fZyhpzHVg`oRBROK|#S?S;sBH}n zB1XK_V#*t<@gDp^$gS2$yuYWX@2ZiJj}DEF4O6e2Qk>g2e_r+WVTYqXicZH-!x#qa zGw}~v=(4EqlK5(CbkzO$=s+nxFjT!HevSL$*yxN(T&Q?*Ubn$P^F&nq&iV7oV?#FA zcIDW}h%;2>N|#;`UwXOQhUM|gMcXd$Bwtmj+&nfcQd3`OhKA6@y^khU={?S!~$O$DwI~Y9Q9v4@7j{Yn&><#cie4R&_&MG+%VG)^-5Wh*KRK8H)F;aH{r-Vb0LA zJU%oqNZ=rc6=>1-5yvWBIanbe#6&nFWy^Muor6T-46qkm5~q}yH_9}Ofi2!%bue!t zV2Cm=2P{!6GDt}Bfu7rN*=UitR)4Me9soop9*S9XmM5pyR~PhEQ7u&OLEVp7L!e9& zY2C6`TO*^6n1-td1|0#d8|`r{pEPuo21oEKPk+Uzy|gKAMVC2_D+Qbrma>!3#j2z2qW%d?M<1>mCXA<&CVK z!LNl$j>cG?5RMi@#{e4af@eBfDTx|%Ow+-IH<^zX8*ohXaZL5Q%fMBe9Gh1ocO|G_ z7Ow+29R-2n#nQ7X28od>OOO^`kbtMx4Lf!XiEH30K&<4Ht+B!CD8cwL%AyJp(SpuW zfqNfK%LP)ztfv)2L}X+HPeP!OkR=#oOI0Gkht}W|&`N5dmmYZ9x%bhGFRJyHUQsFy zR0k?U)*#RGsA#bj2&M5=E3>cKRDHEPk6z;G5(f-Su)5k*wpgW@k(75vqAMI28mMCN zm_HBvB6tgin4y79p4Irh{`EGPlE;MGs1h<*^e_{w-L$@PQ(s1K(e$?&&uu1>`8=ne zHP3zSd`$yCo*XR2x_N0E?X%{CYwfd6mmF=M)j3eXp9*3YF{e#&f>jo8#1Z#d+7bOM zpuwQaCu-UO6uy7HQAdJzQW|Ig~^|aVkdWSovS8AecMIo-A~c;$-?CF-ooZ1 zTGzyWw571^lkw|5wS-fA{NsgtX1ebA6ApeSw|MgG-on(4XyD|HXdvDS^wup3+;(H{ zlEOBW##I;o2>(s(>^)39ttFbOA`$vHOnr<~A2(4sOAC`bd&em$I&4sfb8lFOH~$~4 z>z@0lu5F@M{x+i<-Gyh`5RVzUvQs|0>B;@1#x3P$wmt>amvuD$H1{-t8w!{dn*6|I zpl!EiJaZ^UHUen;c{yp<0)OCS$H5DZ96j1Mzw>nJfTk_$?9enr?l${2Rz}1Q^`@bU zUENegg6lgw_P1$jW7=05E0;g*j8qV_BSv|b01-fx1U*3LRy{JqUPSGAyT2^EGJHF7 z{Q^;%cD`x{Rvcg`GxLc2RvSgCEpZ=54v$o9ME$s}bCITuT+4!$_L^gguq9Pmr*}2rh8`oF*#D1l(XQ&e2 zXjSbEpe1qaN=F8qQEVjQVi}E>!Q=FxDF^0Kp^J$=T}7MxGQ#fbjBMUML`974mU#`D z_Y@){@{P}LknbxT*{cS4*cuwJ2Zl<{CN%TGMi~#s08ZNCb1(zeShZ5!IDC^~7&%ST zm+&kioOm5AkRaPEK}LHb*0_kYcCW54=1Q3SYu%`e_=-{O)tD||=!Q?I`WNB_$+U<3 zc}_>Lm?`a9UGMb*p4d=ol=1gQ659S)ySFkCY--)@{(e${m~*g|n%3UOh4whg2A)^5GKqbt_+Bk}nM;&BPbE#|*nTBp8ej+Q#-+V=P83-m(! zc^$XYvva6muc6w-y73cw0$k6UmLqle}8PQ{!XoZ&3b#V0x1vidg)ZL zm^9MqaxP!8ZKIfT(xqallu9P-R56#+JZUp_!m^6xq>YrBd?ITl%uF)nB+I#C+9?}F z0EK=)UqKxjU15(7TO%VYZW~>(zOrI;#9py}pt@nK*k@NZt{5094~}gr6<1W9L1+Dl zRbFvTrQ{5cuDI#8%E-;gXgk3y<y!ML0fub9_FBT?$Ayd#2frVzcAfk73;ekb+ zDkxx4p~4GyKSj-Wa?y#S^k*wB3FLlkpoG%D>8=1$WX7S-BQ%1f?2KLk4#f6=E_4LY zK3|X33Xx{Q@XG)a`SEM9-m|Ce^Rb88<`PoGwD}*oR$s5_^EG;S;XQ37GkM3c?2?m9 zS{c*GW|KxaTPo+Qgq2Q~yzp+7lZdy?tmBwgF;mW$lgUKVD5mYKlQc33jLpdj@5I*D z#GjfrjX9ZU^MQLLTdM)=Jm5~;4E+e)H>h_3y%%x#E#<`U5QX0Rs9~5W`#DB};C?{+Y&59JjCe2JDn|4Y_WE$l}#wj{h(a7bEL>btHR-B~RMWExMl(Rvu z*U)JXa)s>~<hbLj9f%TPbhK;)-#A)IYO?L3aW*sC4gK&w0g`6 zVK`^W`cZOTNZY%)dQ?-7Q6o_pl}u!^DLawQ=aMP2Ty`?Kd@}1;W+~?+GMPm2-vvey zeXWtiHt zF`pQQBp^KOquN6;{Tyxp%V9(lXq6;E3}iH3c?t<{I2TEE8p&a{hqJle?SZcP1ThA; zLJ4BHAvH-R>nx@%4O5c6fI=Uq!MW->zu?pQ`!JQ+PezZ4b`iyr$(u1OywPB(ds9{$2t|opP(Yv|uz`A~b3dg!m zg@R3{X3TOA8bM-&(A_JFf#MgX2SqWk2tp`{Vj>6&iDDw6rek{`^Qa<-T$mD0&_*2x);7vvo? znM|AIL=NiDter7(>7-puT9$2Ch65=i#^K}?#t?HWwu=(t$3e7T^9vuJ4?_GdbeAB0 z22T&fYhZBbM-bns-UR@AH;j8I$)ryjb0Z{=A>zmy4j_6rR~Ntms*-Skssj#C!%hqi z)XQP8F3GK8-G)H>bu0YvywK40He%#{J~Pt(qu?DdlW1>oIiNgITZP zlYWF*E|#4@%%bW7m_=0*W-*4nv6uy(@C0Mlo>0t65oXO^3X=@sHPAFljrNDmqRslPN+Smi>2uO>qpNYXm^HKAN^p(>|sQ zDyA+Z!;a3v6fG=+WKIDC762A-W@T@jUY^uDNVX28o2w@G+VCPpKmN5Vh9e%LS|xtN zC!Frk4UPZxQj|wqnM$QIIg8Xi&~BD6Ww}z$$XgcP4e%U3$yqXyGa$8gZ2G^H1ZR;= zrE}S`)3h1z7ms+fJ(pOn{nE>Y&9C&+bQREJ zp10NNWvzNy$1h<X*ZwqNp`XLP1fZ!SUoLs424{K?TyhQ3UnkQv@}_ zj?06Z#O=WJY}8o=`!CqGoDPf1(>=B=6TQPnj~?|3RDK4l?QRlZObHcTVe@O+SSN%T z6!>Ie>S5OZ6edYOxRTh8G)OAMFT~@*>KP`_lKWT#E@x^LaXu@Fk=fC^kJ3$U?cLAM z3D!j_VTn0IF}%5(Ttytv4r#*u*M+G;eCJ=f^Qr=3h6|hj1$NqxgKH~HnT4qVaMxrM zKKXF(E-tnTOR~w`y?gj$ckehor*`%l)T8{9cX>Lpvv(;4PJWNstySnQC0doA>Dv4; zD*onw>Mq0%6{Z#l9)4@@{aofk^jpb4!QgXe$wD0#%`5MI{T~nYoY!>^*Q$6kvKQYe zGqcI~&faM*Nhu^jVUr3`2TXMFI z|895T=*+==uy?-u^?$nl%ZS%0#uIqOG!`RjQ)2x1!0bv_t=Q4Q{0I7vN}`OW4`F zot~(&K-I~%(1-vGx>YN;`3@qW#$B9Q_EVq;`spU;(+G5C!h^-U3EA2En5-^GhZJV$ z!!xnlWPA-gAqzq)yw!6PyB5gDWbEL3e|_|5#@(A3&Gsf=<((rs;^$_BC7Lh= z8|}MpcyNF$_`d?9c1?3lwQ-oscB&h!(G7^(KZo`rr?a+e8QGs`+Lzgy-A#@|w9ZKN zs>B*H$N4qKEkO-rQQvn(wK9NEn5v@?VRk7E(GdC#C__Ef`pWqVo1~|d_4sWg168L8 zqu>FUehw<8K#X`oXyQr=q zPvhOiD(-B;3BBrQyfQR+dwjTpnZih&8;@Hf>&G_2+&NC&{CYPPK)XcI)4rbgz-WA^ zQjJ?G&$*QD2i;WtGSzQPrEOJ3780a?@hw!0Pn3cod<*6A9jI87xC*IX_D8I*@KFFn zyv7kO9QjTNte$Syw59&-W|~a=#je<$?Ldl*_8;4H(J(T8gzztrAVpf@9)@NfB5|%e zXcU=Fk_y__!9Y2%n$+cC0tL{46sC4m*K1~5fmlO(H99txCp~wA>Qgz4M5Fkuc2nb# zN`Vh)k|O2uqV8ySfpa9X1CJ%1QmwDj#~EWE1YJ#U$F`n)+GTil`Fa_*d-dF?lxH|P9C z?eX?Mjh#Dp!6Hq+41@hoG5r?(qp^$ISGKL{_(1F&{R6Qp=1j%rziZyxeA{5NXqU3d zMA}FftxUO`E2nJGH8YpY=8bZ??2{})Mv+Ka*(|7W(y+3bWHLqCP1DRKa!JPqHfZ`! zP8P``83+rJVlQTfZF0ZU69*-KPbVd@TQJROw;n;WMdU?;JPb`p0G2l%*TW*+Vxr_|v81wXS}903K3 z&MXVc1LwCxc7ph1^oWKp&=W5=BhPsfr>lY$tInf&@JevI{`_aVjLb4xGa3n54+mwa z53m3;;vC>}ECoFua@D2F2(B*ZW0~sSN6YBL&!P_rex;ojv=r9GP{Ax87<`$}1(NpI z1H35qkw$0xe#AS5vvA-}0?jsx%F*L)|x~6seSD_5+)B>#w zBuQ!?Y9IlIxzLKpgK?nPUVALh!8ojLfm)SHZgq&>h7{ew!*@j9hgpr}C-0Z(J=6NP zV_(x_)B0QFF}R1s=jC6+se?}aaGTb1U+j~50_WY{*Zyw(Z)34DHT@jzw)Wfgm2Hl0 zws*Ci>7&^x(ArX|lt`sAwpC2oSu<^A@R_tyPz{wVufRE9w2PTc#>ByeayFYzKu%pu z^iWY0W|%E$Iv895wsSaWkt|B^|C>|46Bz-{GxVdME_PZNhU>7 zH^oWv7DCyucLS}g5_FJ7^@DnWwa z8+K8ABfbG%7X;t1TJR0Dw$d+>F~KTJ71Pp7mAb3t{rF(Mp+A~$I7k_RH%LPA4Ikg| z2OPJC#2%{%MFigfG$Bb2dbNk*k*!-|*&W0`REd8WR2~b!L;%Yk?;Yo)k4rw`VaYc< zELnx!Lc#(*VjrhlL#)FN*{M~;fou)ynPM0)uIe?Kk4Sp?2p*Y}l8=~RVM`RH`Ees| z;#c9^#1~k)QqN61z~PO#3BfajaT7mo#7&H|0qE&|ULl(x6P>`q9!O#~WGAwu{|GMi zM0Ux%d-m)^Qd<|oP9!v=j-BwBcE_X7*f(H0_JHYlNXMusrsE0mdHF-kbd0xaJ-aVxH$3k-&wIb;y`bJnqjQI%Pd+Jafd#h> zJipKe2>US7=(N%T5J_r50b+Hk9a<#n1Qq~kCJu_dgyW@?97QuhKEW<^YG9ZI--H)Q z(OEkHM>(Wiz#!}3EdDoQOQvOGaqHn-Qr?w*B|__k;ImuOlHrAbvv*Tmg2M~-UZ zj!+i*;EvGpMIYP|dKY2KX?tFVpw#?Jp%J9E5VTM`=!t=3O%N@= zuOX~@>2bj!gbdu}$Eel#QDHOuM%56kd<=~#Y^%fz+b%^pFoERsd%JVgFd&rvHkS9g+F&lzKK$;rr>c3hDH7T7;aVPHYhQ9 z^a)^cZAkFU(?BFV_VNQFm|=Rd5j+Tns zwfsT&1khT?AB0zAVv7&8WVoV3AYHC7`NP7|r%7NaGRu55gey$`O|72)4l{stWCozd zWDgHkZG#~jg!XfBSe`{QWWQ(uHI#|Mp*8 zo%X>|&wJYQ{=Z)1CjJPIPr6cvrfS-svzm6DM;}L?h~chi1m1?rl@aOk>mt0uAsD&I zE?HHp2ZKsSbc4>nwz3pcXjyOg1nIwWl#=C^t|D@yb+a=nb(ddd-LA066wVBxVyiIYldG(h+L)25@Cyn><_9awdIzXy66ugCZCB3!!$kqnlq{@if$$O4IrFZmd zec{Ih2_9jUacKFfQ_pvjELR>9HC%69&_ilgPjL}>s+O%|;9EA(|=moWYk(I^^ zA-uWjLs#Q_Ch+|{zPq(r{41(fpaKBC`OhoG6orsZST~y%lzB?d>&%%DBX7+R4F<#ON#Me!vm$K zN%h(oFc8g+9Uhv>qI7H3hN z!0|fOL`FYvd*}CJ_jUrcKR<^bznaI794RZ_qJ8QwVgoRhxTEtgbp4W!v-L+~`layd zrJsKKyE+{GTkY)^T`*_);n;<9&Y$~^xpU7=tj^DknVMe4IaYlDQf3n(6^kB+Z{mkdRPnk{#htsD_C&K(P zzu8PD$N^#vc8N)8r0aN7TTYmXY%^7dnaDI!b(rv$u_D@)O+eRCb7eDo%ZWO7ISsTJ zEtpPh)lZpDwBRDRX6_aQ`xyuwe)}vZA{=$KW;js?qBVH0=T3r#6JOO&8BSEB$7?R`U%t>rEVJ6c}d5L7IAuo|ko={#QdY@TG zsC{-5(Jt{CupBKIPJF||a-1@ph`g1cM)t#zWc(aR>(24nO&kcZo0ujA3=}iF=t&A* zJn2X8`#kUaJ@3zZ-Vdnv6J|Hjv;|NbQ42_`MYY3jVj2&&n_vYLR6u(;UY@-3Xs?>s zO|YhDO4?1ZE)5@06z>%Jt6D))ZO3|NX*aO|f>WgphCRSqkKM%2q2MLOlv0neHN#^x z$WWXq1ugk9Aw8q8oA@KjvN_6bVr_`s#Cn;7?IwiEi!2{dtxyU|O(iShP^~g9;A+VX zx2|)Po9!k#P_nX{(0E{X2p?{kh;I7EAhB+iN2O*r@q1()0K19R1WV)SF8%0zi+Dx% zSHV`1?Iu3JX=`>9uTfG=@L_s->?Y<>x|Z!G3N^b4a-*;8CdOsq)DEg-A3fQIQyl0J zb`!@8hp?OAE^kM%^rJ4{FJ7hH#HC~)(1P8>0m_XA(!i2E1eHk7X&FwXQv5D@vVFwx z?Cc}x0ui>Ah?5BhELPT#84X!F0P0u5h7@KF>oCO%u($0L8eE2p3hPs!#*Cngd-(yy zvCzgU%NlvOoB+F~u;0(krt|TWyX8T5ubG2EFy)##kZoybR$)9#O;#u*E8F}edP9~1 zbi8G@Sq+sHeFxl&@%1z~tk!8xMm6q~_9hbZxF>|;6e9*k+6eSh9}!ct5qOM-SvfnR zbKKj)4FvjyfxzdXBqsv_Mo1{wJE=g_!U#DTX&|tK>y-uqasH470!#U$bprvBSz0#L zh=()~5cQCOK!43ZK-y@j>DnReg3x#_3yY9wgX|RGoI3^hrcNFFebW559sI)oVb%M8 zqyE=f2b@Eu1HwAsn`}X`Q@TySxoqkm21t~-lNYzzfbHzvPw~Li=kcT;y&q8Ta*09p zza$%l_VXyk!EK)R4|(3Z>3#0OU%+5s^=z#JJ{fHtzRv@TdpMW7igwx~u&s5s=Eo=%< zw;6&n0Ja7QRj)tCRseM~x0`kR4fX3Y81sxzD{7{5(JFM4kI6?U-_3M}G_G|`4;0VW z2{l)+n>Z(rL+;TEUNfC&q+mDGhIa3q78K_uPQG*EHRbbK%NL`Q6<uE3DyOuD`0cFN22rpZpr` z3pM@v2o3j|_T4f468`Th{X*!rKdvA0>$c%;xm3*KoPWwTaI!B2KcbEeujS>um4hpP zpOe^B33tn7its~fm6EpMB(k;S*FUf9(r%zRGnl=+weU*IU*gW*)`I%55Q(KYdpT$k4w zz*jj$2@k|3<1tk`^>Idipf`iGoujj}I6V*-`yX)8TQ~t7O@W2*cK*N}$~=6uJ%_+x zkUS9Zxk2UbX}2_uSNHMl=$+<_!t>a6ky(10_J|JLSK0unO+ih@fqqhrOAcO0e$`Yw z?F}2c72qBr@4=tZ?$#Bq`28wyclklwB&L9`U2bQdIH3~#ElQ0Ds!yik0pPq7o7g?}(^KnnBa6zTM zwe49Q({UFZMl8U+$!GGHI?0Kz1z-xc&(SXXO54?XTJO^5&8NTm{HOFey&ZGrEIRF7 z_`5{Y4eh4(-_id()^@3Gc4lMmiJj4QuKt$T5}z4cDS^9?O}m^=8Cj!j=F=r3Z`-AG z%1Y;w$&&AOg@j>Sc>~w@mD5HJi8E#mX>PAFcEo)Vo?TTnB%&8hd9%Zh4xduIR1ulnDtlSpfTgVRb1?T~L_BLC{;!pke$YK`P0tg=l}2 zQfy|?{w?qW$jyGycXW>2R}!Bg(r0CQ6!^HPjnCe_or1SjmKL_%K^^~v=ja{hXRku^ zXY*&ewXu*T}T8HSvcO&JBf_R){hw515D?u|_0#C1G?we_&O2 zIf@Nb8aNTsEkJ<0iFltG5xY$uk75DxB~-yPduc`s&wP&`D4d0XsBtNmyHg$>BqqU? z$L^vW7*;f9tJJE1o0Ij+9IgPFbTKVwjiVJj_J9gLPecqOfrt=jN>Q$8q7>zdcDkZm z^l%-ueA`6~UGPAPBEWgL^O;|OSPUzQC1|AzWXt&uDdNQ@HwcNbr!oEf&cL$L?dl-% z6F)`6KxGg=8!td$O8hVtzm#-W#1IpAWKc+sPXSZN=GW2F9n2keP5g|Sy!mlHBL$H7 z8~U931S^2X3Y4$EK$kWw7hE@0isqvsvJdDSnjjctib4|(P#u^fH7(TENqCIih*0S_ zg0FwrEg17gaB>;7M#0H<1)QiEQSh==z)KB53)>X??;T(?n8wSUP-?h3BpL}%TevGo zifn>^ZcwCxyC%iThHhgazw|0vYgP+zhJGxiDKLmCuBAcYR&Hf#Nu44}3Ee0W(I+9e z{tgjX*0ufDPDBu~`r;qc+cSGHyxREdnD*~L_QWxtdyw9Uj158)!H$G?_l{Eq3<`|B z<8I;#CmsOg;dmpH96AO25jXw{5f5O*eIJ})(#3|ibEZ6r4LC9g<7*JpfxU%GAXMe zX@UGXLro@-v_SPyMl@$Nzt)RB3Bg5t43|DjSKUtheFmoRUa7O~cLGQK3GB zUpR@RR3!%Z4*G0_H*1awZ_p2>C7=dGMldOJs2m@iIwmx_pE@K*YMS3qHZ-Df(Yh7@ zoBoKgiE!pJ(&Z9feKAFE&&*)3T-w<+@oRius#Py2U|Rc0pI$wSQs0Ubt)BuE+}wHa zeNbq*1t0nuLA<02wZrsGf1x847(_}s6o>56Q;`o!k~CY02GoIKhml%o7ZLDeu}uau zaSJxN6LJCDM zt(}s5CKi0c``V|M@?HuMSw}fND-%Q!SR!j23lRs9^Ei=n&~%#YNtZxAN8ki@0due1 zNl5jr;KE3Z1!~_V6gVQxU~YBN)yVrba;nK7e-L953Yejm8Vx&VITu1;>({<-)?|z5Q^uj;ikX z{e$bE53ND?Q6-X! z1U5k=ELC1S;&ol((H6q;357=+jvtSBg)K;Qa`EU5XI=hH@d!ZsuUZX{z=}vbO5@P{ z?D5Ea?%>D&cMV9s^DDf!9sG-5)`C7v?^kIDO?u8fc;KIFQUA^JemA{q2iM}UBj$?u z3{rJA@=Q9;XZL-w1l$(EZUkLnkoVO5!(r*4j!wUaZ+jA|YOkb3o&683 z0rsD>pf7kz{Q;J^g_fyKslUL&ykN%r*=Kyij1Tas{-DBG_yV9%x(j;mqlQ5u{4cZP z_)xX3uvK`C6D&nGk2hImoUgsYegcB--}kd?fX0*XO{n`LMr?PO*Y@ZA&e_Rnzwp|= zx4EwW2q$sf98lhC+2i}+=IZ_;uRH8N5eDI@H8*0whuk1w3Xh76wR$sp%}Xsx@Lp1n9^uW8|SwAxp3h(2PB zck&_n08#h{K11&tCo#tR-xY}g3R<_a=Tcu@F~&zYq9MR;J(v1kj2>eaxQ?=;k-%_| z_fX6H!~G8&7VfV9{$5HS9`}8YbAef2331DSUW3O$Dxuo@EKhL^Hmfq2l~NydxJf$=^a?-6Z3V^G;10_9<5qIID4& zRIUIdg!xq<1-X=nu)ay)K8gmBCL%*FRtS`DAk{K9U|V*B*}(cFk*L|s1nx%gmxzm} zTc{T&URy~8>gu*G(Xe%N0Y!5YqeKEe%IE4O(P2387_+PK&&02elX48S`jYq+MqFB` z;oVg4@xYZHi3H6{)6q@P9JJCZnuVsNnKT2QeayV7 zxVn)uQ`*tCma0u?&nkl;K^FahX4U}QTcKY&nJ{S3ytLI{h(V(jgV zQr;h(=QL>K=dT62(eiW%aG&ih=6g^~ZbjZ_= z57>m^-|TD!YCT?E=ex#Zn4r8o*NjV!&r;x1`gwagzc>G(PA~>v)cNsfJ3oHZ#*a_% z--B~JA2t~dyi?nu|FrGB?J50j9bNi6yZ-v0K)9CJZOQa`=@Pxa38^u+wm_96Xa z`h~HD3+K#R5NnSuym0Qkg_plIwg~^hx~4thCOvm<*TU20&Rsk&t?Bo{yDq%%T&9oc zW&L@wOT0?ISU2@g!xxW!9Ui}owNJ!4P5rr8J3iaC#!h?t;%~=JyH)>YY|)|E+hEr5 z=P~_O{a1bqM$5`LDO^uf#$85E+OW%(VdrxBoL$V79HW@@dgpOURxxjt%%lO&)X6VuHWPe`lLn(DgoeGqj;x*Fid#YRbIMYE&^SaeNV z8gT#68FgsPs{w8bHRvQk4e1I9VxSo&_#@5mACfN1qn_YHvKS8mk(gyD80hF0bO}76 zIH<)F2}Plr!25ZAl&vhP*v@0im(Rigl|Q5*D#}|;%9XV+x9QsU9O=wZ&=wkhp6U(Q z#Nl4qRZ~|&N207d#O=4MQ1D|&NY74``Si-(Ymj!Ugw_e6pF(MIt`O>BJYjW<UL1 zQDW34)Jm!X8u-}w9T@$tdw#+ds&jT{_fj8fx^iF}+gnIVaz76+-IgK_vvy5<$d_GO zX`^S{50IPZDF+&MfS&alOAcnZIm!uwnwGUZKn+R6e#%G#Drc|`pC_A>h&K%`zs`DT zK=9T}2}0k?s>^!~ddIxrByIHMOOq`&N%PEk4Liy4xVmeXkoYK$+%qzJF?wqs9n7r3 zs~l84(G&23v#*b{9f!LX)Mz@>Xu!i-?vjFhY9xiQ z5ntDYaP}e-krsTAuF8@6YS0U=5Lvjw=)sOANV}7^lkkK=$StD2DeQ zyo=7r#tu598b^a);{lbORAh!=p|%KM+2*mAbaZT2p$QFD#kjQeo`+HC!HpO*C;-o) z2DBag5;VnYT2}+E>x?=>`D<4Ac{M}WAZrI$4;P_X_!Ts95t;=k(jfR0R`l+DtndlV zX8s|vRJhs3y4g@a@DwNUL?1A~z zzvaAZ-)Ifr0P!AOrp)5LJ27q(exKSTUq>=TOhYRcaboj8bksjXPo~uri_R^mYhY+S z%G=3mvgn5Vgf(XFz@ibBZ9Ai*iJQvSXcbo~!o3@xo{cWztE?Y$W%E?T=;*-uA;`xx z;b%K2M}LfcUHf*;#wI!oHXw!aHwE5@jF;=Wa_wjSA zK@0EWuiwE!c%ExlQ~brVX+~THidS{M6$-cK!_~lFd1qs=+0PEI-E$onYFs3andW=X zUtw*XSJtR$KV_Yq+YG?2>mmDrbE$>FvWCb3Fs{sgH}?rUK#!v6WdznK7GCk|!{puG zub&qMkT@uULvU+-Lasz3VOt_ka?U6jDI{@l|LpB_pyZYW<&cZDB#MS~tf9yiGO@4e zjWsH8n*D626gZLCj5pN*p)uTM-#ag|%el06v@<#ucY@FLOQrlH0*Bd8bT&E>oWQZ2 z=mF2sP({r~j-<0qRRlP(^Q@AqMZ4QhWZ{)m9k6Yv^2zeWuNg&baqF!!5}>&LA;=O? zL?L@1DUF2?57y{+Ac?8H8a*Gl1g?R=I+4NU=nbbUlOyd%3li%@CaZmSUX!+Fn&=EN ze44!OxU;{*SQT(OJFs{Ml9<}P&62?HdYkH4L)Ae#Pm<;jHWYy?CITXF62h(@C}~f4 zQHVsg2plDU^j>PAq{9z&`lQ2j(t159ECMbLNLX90j184&sD*onVx=;u%}2P!!fxNH z#-izg((hntf3Y(X;#3>%^|!$**X{_HYyaLzGW=XxLQ_`{3)NLk$XqW+Cho1q-zXtp zdtiQ7>gVmBKR)N#`Jmr_HJ2aXYv;#m{``*Vi7SDu#dK3)H2TgSU0 zpw;K-+hQHR3qR>A^*M_c=ojgy>(A;PXLRINq%xP_^!T5}V!a)!=gyt?{`b7Sb8c*C z&ce3SH2ouRH_#5({3OIJ>;GHVzZ}#5KN8jcK@0_1I$X3DV}Bo8@EPBEv~ninluJ1y zpRsViyq(BqN_NUL?Ua?l8&uZbn+8ibWS&nrIB}jyTe(CT2k$aPCzHq)Gr25WCjfO% z!lUOYOlXMURvtmep`@=a^ayWP_D*{xw{$iA0d$=9N8Q-9(2w4ic;4flciQBtG_)<_ zckFnC&__sOI}u(df}m5+Q+JS+_Z^TqZ9ak&I0M}^k)g!c7fz$35Qzu}^H#fq5@VMi#Bv&iU2Gd8<6JE_V0ZMWgP1>Hrdvkn@+crG@Zd zHSZcj&VYB_Gmpk=ii8;uYpnHOt@|CS3vH%i3zK5j4)Z%nHW2Ze{6II?ATw$jraus- zvrDx#uF!UnOuYFp79YL8gHBdeNjM-a_3!@T?V4}eD?J0l25XVduob z^(X0{;oFqwB zENW}u_SVNVB~3Ee@am`o(JA=F_3|!(=p;GbZIIpL=O-}a2x%2PR zAJN<2txxK)F0lF9ZSAKq>63acf{&HdSE4jkd`>-mkA=D&Q3))or#&rhER7X5Kb3n@erSjOXNOCU*(}%+5IHOg|z8Ryui_aRk zsO}MY?T-y>T8AGwaDBzQ{F`C{z@|16YxnT+#P^TEse&-5$Hzy>3ItC2hu2hM!*2nY zjmNb4#dGwchMTaj(=YM^65Jix*>pZ*q)QnqlT2kZb}?Pd=1ZA!!pZx1&r&Akq)jVj zr;26AvT`ZI0np_$rekM{WMBY5I-swh`bSsTqr=w7$co!WSFEqB7#*=!tRJXu7%TSK zm5nO~hRTCun@YtMRcFvyKVp?vTvI7I!=o#1x~(#DGcwvvFiRPCGJe`ZSr?Tm_RY?S zJviVDRaZdtrf+x)+qN zdG5#gKmjhKV4qqFwP>{i3bqZ}lk_)s=rwIWv2g9PnUD@WuF=2wRoLzzuZO2k&0-Fc ziJVb%;F^m5FTqJwF`G)~vSkO}tIRTPCI4-h13=Pm5S_;{W-dC@$8n?c_AqqLMxisz zc>?GRDY)Fr0-c4lEr8Dp_r@X++DX}_nX}-(ES<|FOe1e4Ez2q6&Mh;I`|Eux!@moJ z#+JB00-+0x(C~))nzr4C&@D}g!svxRiq)a?Y-Zu#1xkZYoPnvsx7#CEXe`XMLcbr| zwY9>Zz{>yCY3WQUgf*) znWju~k#D@d-!6}fU#gA$E>}-mOR;lBEhv%npRD(s`V=^bmxzO4#z7d5aJ`-yejLt0 zaKpX50Ld|D4t-&M3tYt4S$`SEMd0iqc1Et|O6oOU;>})I`u#C*2HN}p+d;mmd=kMx zd=4v&FWzSzyCNW_pUo24qn|Y%>$?lB`d*Cw67MptYiHco-U9>8_q6}_7{vJ6mtyV3 zn4gKU%jr};nK!a#77{bladO2ZI1=#1xojRc6?(a0a4U(lnYJ_V>SPvkWv65~kiXeZ z*36glNeG)ydTtnf81JNiHCzKr`&v+BwuO*_aqiPD&UH<^z@bW%vj*l^u+F%j@n;qJ zi&^P1&t3OC$iael_5gi7+uSvA7bO**7=T*WlYC|m=k{P%p&q-gAnaPl0wUDR)q5%m z!LDwQ1rpN4VCp~}Br-wY+M!9K1octh? zocxe2A;JjF_Skmp&X)KINs>-0_y|cLK10F*|1se}3S2`Ri&E?*ab*uY<3*_blDM(w zLk(+BKyne#Cj@FIt)j2}Bz!oZjqKq<(BE*{Vbl&7+!crwTeyn9G(|aE;zd$=&vyO) zjoqtbq@NX!UjCR%`@eQ=*-zS+wJ*|Nh;^3q59-}*Z(G>CU@jy#+8yl!vG2y(ulH?8 za@l0AR4(VrIWt`<;%?VOCgIpdISGf#IVbDmA&Yi0YZ^`|VU|ip5zJ;TW58RqowSiR zYw@P=BveRvw}b%HX&8upbxw_*2BZtH#JwJD!GZ#r$WGndP3CRGf|fANS}MudJ{N7b z9U}Sxk^pi-)~&)m4w?HEIr%>2SzVZVxObeBR!MYGa~)ft%&^$~?$`=JV%RW?9l*f| z?j>Z=Tp~h@(+mP=m$T_fqP^4j@^CifkPYk&swa!C{Jt@hXj;EunTX;u?E)zTIU6LK zFUX{QRgC>-$_gP15wOiOehT$AL7}Jx#JX*I61HC52y2Dg`Eew-^9?3ML2hR!OR&R< z&|z>pi~UFpSL6XC4#cP6%hQMCf%qhjqxw-jkRU-+EQ$)HpmS~zYcy^ge3#eJD5=#A z(6(98lBN1thhu*ddo>n29J^aQIzdJD>ss#*=A5H{RKHq3E%ve4yXK!Zuag+E2V?EF ze&sO}kVHC#V=89KO6AiT%S<~-962?tOgWE}EX6b?=%i%GNP91=oTfWLKRmpck9OSs z$~ZdM^$45O!Xmu-4)%@9a(N79oTktff5U`lP8vjvhq;1Q=e2^`?yAL5A3+x^l z$|_Y8X%dM7;17roTjYZba0O&c+fS9!OAa`DmnWAYQn2NUD#bna(WE5bD9K$Pt4E~)_7VyM zQy6m2!_yWtHFONctm$elq51qD(0pF%@@6bBZsh2upOsD$o=TM=XYZq!k+q?rcleZJ z)eW$oleTtSVGSG8$dfMxxW(Mh{2!4~SYM7xkkuY=L&>R#3f;}2^3DQT>?0gTezj$Q z^t_Eje=^|vvaH)jZEJ8)h5m>`yUCx3O!qT3*PV`eREu<=F)k#v7n62z#Hyl|@zL8y ztIo!Fsp5>rhbqfszv!E{a3$-xocs1+X&3FijZaxz;Lk);WUzbxs8TD{Lm>R{k%50{I=JW@{6oL ztt|+e_AoD5K5eY9BHhY*(}vCX3L7PROPk?LA*D?-d)~0LZ?gOGb)E_MDD02o7QDfb zAyRzf*l0Cgbm9X;@ey**5kIeV(RrmF499j>%KBBj-(oFpz;nkdoUmalUPHbRKxL8e zBLC`&>ra{+?cl+`d_v2W3JY%59x*23%VA6ykqDOI|_@Q(!bo%_4^&*=FiaHtN$d1L!z;3 zeXFryn(1<`luj12Y1_=?GpSUnY~-wxk%TCul=q59N>(OSGI9yS$!5z%*djq+n1u8= z4GSeZVUm8K{iI{(PfDd!FB=c`ZLmf+^npmRR!^s^h`!wsxetlcLba4vPvNQB<|P?| zhi*(9Me=GX*LG$C0vXPy44d1*BG8#b&V_{+x!lz^EZ5JebmWT9D%S%t7yRY6Tvv5H z)zd$l9Lp)k>gXJNzLvO=%}>Rjlr9es{gryzuACr#1l*5$r`?Ns{~;E0_5LNicelQ< zWZ)K56{<~fGTuT&f>cV4SQ7)^}F2iCfiT7^pO zIs{$Gaw`no?0Y0ekOWx;HpU=Xy zfdngCC&musC&JwzKTd-*jwiC+D1Q?*K$GJiNgV%ZTz*QsykY&xpuV%Vsszk3=yDtX z0z?bVEm%^7qQG4|C^%$I8P?C*4p#o&SZsUj4F3C|_?X8ZZ{fdZ@?Q}m-rq^Ww>emW z@CPf`daNtoD@^Vye0Qd;@SP{@d%tt@s~^z{T@O7^4qcd;#!6LPi7S=6`ATI7 zt-PFU!OJzX&jl*-=!~l5jz6Fk42$4X22rrbI$EbSINBV}Irgv9w1oi-P6NWD_MA9~ z*5!$;!G1lU&z7SSN3xdHW&=Fcx-GGrV(U%=MDM}jn|AzxP2!z;(g)C|SSr{g%IzOm zs@eeR2VnEPuPqXs*9u5psvsG>#Yk(KNhI5Nq8VM!0NU)4CVM1}-=Zqe2uu_SMzWqd6y)??QS%L7DZ+$Fu!}_ffyDv`siPMQ9Hf4u77MY1q{Twb$f?z078dNnCoOHjJtN34 z{XzroCyenK6pRn}B3LH;i&p&pI<5Ayo(VfXuxu+1VJAXuMn0%%s~(S~pd z^qFI{wOvi*-N24F!2j%^I9ddl|M`>2*rEGoLHD6A0uNL#dI*Q>R3o@fM!vw5AE3!Qoz^QoelY_xu zio+|_fsIbz(7+(OosiPrFv3lCyEWFs&$PhF#~VesmEY{Z#GInuv!LFFfN-MTfnjWw z<5!>^!tpal-!&2B_+hIy%Hmeu5%@X&Hi+pu$qWn}KP!LQA#i1TFg|E5ttq-`JAhpP zPQ|+&h)ANunxE2-mHR&cE4w|7~AC#&dX1)i+l=w&4^F{hT5V_Q}N!`SMwqCor9 zvJ#XdrEsf5E6WOTWzD>cC2_?=v`h((=8jXQq}Gi95CoT%K=i3)<@lGCfRC_Q?N*?< z_^n-5QU+<=B6ce&T+s4HFDrqGIkl`D+p-cE##WaV1=^>UmHK5R+rqMvGKec{wydOZ z5oYAFlEMv2Cums-06}nB2}GY-R*rvJ3HU;q)v^MO*l+K$Vj7K>mHZpNtOO?J)UtAH z%SvDvTU}NZXrEeE>XsE;A=rGkk}`=aYr3qMi3ZCGt{6N)%Sr$Ug3C%E`qZ*=EX&HG z8$D-zhAQ!mR@L4>C#>LG3#Xk%>7xT#!CNkPG{0shIk1T$^GE?bP zkxsbdfDB5tjbhHhk@r$51v|P_5qc9H)-+40%yaOM7}qcC*s`4d`P*jUa6B$hGqa9k zT16aQEhm$Sq)~)fo|80SKd&!=2Lz%}c#5B|9ci-3a~#VqIXU?1Ls{9RQO=giIV)kM zlcidII1dZSZ^Bz+A`6#*iA>sr=aY<6bgZJ0%NvO@>UT5@zZR$VtNin*6ki(*$D!Ne z96M(gGiD-%qH<2&gmXbS9JU>^RtC!Av7qw&Wt@n1B;gl4Whc_`S(P%&WhVnW#jInQ zrJR$!&*jru6p}9{%gJIoSHu;6 zStsvUIAIM(eJE3b@);zgGsgcDM0X~Kt~8XBl8C)yQ$Rx4)|-`j9*lxZ=#zT6T398Q?^$-viwqXY@lit z`;=eSK6yRpm4g*>TuJo>>amlhd=j*&?AU43PUbC;G|NcSshc7e0HZdDum|B^AN}>U zHCrwhaj#|8Fw*2OHI>e1OKBh)K%`iNx4c>zZd*ke%Yzfalt9cwhAYD#To1xOo(E1K zry(kLpmo3LS@5o^|E8mRmS=P?#-JLS)+@(S5OsW9EXOjCmWI$9v^*mF@{sIEOWB7! zwZmKKsIv+#L*ve-VaKkH#w$aEx5tMoYZCQY0ciAWp(ts=?=?k*hWBYSqTmlr)A>Jhr4FcE@g34*+>`R z76fjIQZ|m`o4DpSZ@}GgQPcZ1t)J>5JfyP2{(<&1^28lNck)HMn8{>t1$Dlh&88C| zAH_t@PG#Xf4)ua31GpNbBO~IiemY*5hWPrB!kCmwr9>)~v8`gtMj2KHpGhl)RiT7M zO_c?=0+=-p4tj)z)c0WX&^UpW&eM#_n-Q#NyE0>fx0L13ZKb!j%WgVz^d zKThPBBpk!cWo^tVSOnZdoWa_dv#~rI<+5$Vm#sS{-83Hf_rMZ+IH=m4|QCf*@I+`;AcgiEr^PzWH5ri(Y-u zi+vTC!7_Nk1l&TF5~VWENoB#;=CK9DwoM~rOl?n$MxS8r!V~<@-@bBI7*+Nc9U1nWh0K8Kr2q7{>oa3`fN!xG|xFx8Z zwbHmUC~I2|h|4alzq{Jrf|h+uw$AhLuGIWaOHW)T@V-+}4a4O7xJbwZM803y~mBa7&q@QR*oZtPZg}q;?m;0+u8 z(y^^A=dBzz#TbeEf#~~Yl?SjtI%E|G9lE}yJOD>O>~iV6TgBZi`Z^DwoQ16Pi_RdyxYg3BB`ce@ zL@=@+6m{2Ft?*XJ7PL|-)n1IQoe_MLc?tiJ0L2GYW6qN=fyRXAm2EqQI;pA6ZwkFt zjMQq?xHB6x4tK%I=Co8UHNYc?bxIH|StT=RfNLvSSu39}L-?6D(-2@`m#yjh!Hw_l zAc(nDHOKoDh+7NUReR;P2mRlAApW!bJr0-Rmvcf9AN)p?jPFtTFW}@LgW{>FZ%A{! zKYe^TXv?^pU&m|xH*%=@5%2x!)A;JFg>Ze|?{RHrfg)%~hVt=U`^}%mJJNq$2C4Gg z?{OL7);|MUfk5}0;yvHf^j-T=f8@qO1o~#zvNBEzd$}?;SWeoo%a#ELjU*Y)l^nX} zLq7o8c_2o!OLXREeZQH1fWc$B?wEI?qMw@f9{Cc$*xF@+{>zdjk{naLtzj+h_oa<` zi_GS>3F!M=ITH@iAepJ@jOwbRCLs~WHRXFn}V`1Q;aPqo9xg+@nfVInRp43O) zs5ZU4k!3~WF&b6Emc6L@SpEU}XL0EoE!;P%#|>If_f`DWKl$$_Aw zD!r;{z9(H+Prt+BA)jihT>9x7{-zgU=>^!kP@OdRpQV4D3&c~s%WwY@*Ys)>S55+) z5kG&sn%(K+<|i`Rb6!tiG>=Ed)AT-}CUqZg-1%(}>nPkZ)ZEw4e^a;Wt}>6{ntpNo}jiCh+*-vQA$j&UX%iaP9GGYjpIsQbaboiNH<6 zxt6O_!Jb=FBQi(a9(rVvd$QQQKQ*ER*NL&4klWC!+CcuHuT5MVk=s3^DU)$tauT?j z)NP{QR}T#062FK7mWHL}QqCgR(Qw^d87W8P^}8cJvS^uV^RP8EU_;p9Y>LR~_O6!F zKl1EYTijP0QG`44o}^p`0pyiqBO}lxHUO5h{i#Gw_`PNq+5m^Kfhc%6+nbhG90S%}rC6E73x(1m~jB9ORx|Gg28F zj>t!&=}E;oDGUSIC>xOlm4x#wM*u@E=V}PJbVqD3DVNcU2s!_a>ZU34CKJ8&%q=`} zNA=d9igVT=;F`#MH3&c&Q6Cy1Cbv;jX(T&$i8;dFy;ZJM3%OF9 zi-y{zkaS;fL{5TdUpmgc7_RF-geyZMGLdI{iWW5~3ZkxQj>zOXXH@ApH_f~Prv}ZG zivo{0-#)BH#hcUx!%tWi zNXaFHUMC;Irp1BqOP7JRI~Vk^O&aH>b;eU_MAL~9dQx%Dj6aMDZn$t5QR%rf7kV~Y zH#-be5lwNS$4$ujL!p^3wTLJu6q?m77?{)ISQ!R<*V)z(8BdKylyXiW8Mh@AUx%wv ztYBXlA|g4LWrzxvE~A#<-scOrNfE7C7MhsLabc9Z=-d)ff(zd=nSX3~UW&I@oob~a z!E(p8E(4dhRAe6QTQf4S@jAK{JEAo`bqPg&HDK#;3rtI};wI$$q4QI=qSlen`9X5g z76sM3tPv7K^`$N$=cl!sF^ls@?@h@i5Eb#15YY|EnX25Jlb8a<8KoogdHE^2Hp@eCiCSOV8ns5Ux*9Vr)R^I1 z3DLtVN~MA7Km|9m^FT(C1>&bM9>LUg^=DhCAAY30JtnfZ(Tds-xcDoRab9=J=G@W{ zZC&n8rQ_T#WkszPu4f+|P25zrMyr`9M%Kmtx(r;Ny9iVC=;*-up{Tu-OS5Vzxdc~E zfN*5LJ7*vTLlAzhD+^NPsfz3vOWt=-2Jk=}n}mroFQJW7J5 z4yIiN1u31_KF3%0u^fEx+D|lf#Ote{lmwt`%WG~BX(6DMidLa8Nn^OTR|x%-pSflQ zggMnbvPhf^?Xw2g*D8_Jk%F89v$_z!2x zFxZMI9p$(8zunL}JKZWoMYI3AFy5`BWOu>A(Bm39>voi6&>$YeP{GuIXD4E0V6?BI zZ*#1h`$RWu{nW-|6z|f?Pdm7K&l((~z3q065{}~)bcRo83WnaegpB@%POX^GLSbd0 z0QIf=g9z8t?7)1IEQo)~F2RkL(;1PMS{U~f`$21?0|+rZr%08Gm!f20^*Ms&&_*?) z9Eu7lN93S#&hfMd5?fKsQq@7&aPki^vXr{?!9pO=iztLded5TNw-8QGh4gq@a*bnG zWS3lfDP<$Uak8M0Ji);!R3ZnNqCy5oxU!$cTY{>M6dC|F5g2YEo3ZNxX`7;Zy}g zJb_^?{rX!<DGk%s;L_2Iq|U^&0DNgPOliYt z&;gF|-M0h6tW!_D$rY*D!!d#62RP764wwePimHvusk!aKQ3($5CY6+9e_hh?b{_6} zdV+2!dzh1YD^*B7-l57L*hm+} z4nxmCY!HBLJEDN{!AUx@sNd6BV!4)%DHh~{c(GC;!nIn{-ml#(R#)2A_i)4F0)21{qQgEEos<%Il{8HIYPLe;l=s&Mr@9hf-Y_C~Ndt&A5V zB_;GQJRxHT@Q-GFrV%x+YmoyFYiOj{-Y~^Dqg@b+1nznq(bELi^L?otF;vl9vgD{H zl!1#JdccYYMsN6p3!nha{*Z&_o)i)yBlA-`&t~F!gL|g~F|%Nk>C8R-cFfuS0^m;> zs*Av5_{(f^`)uH+>2OI(-k^63HWcpxn_5pBLQ52*{63UMBL-jZH}`pldPS|=5QRl> zki!84mx49?2>_W0j{cEFYdru0p~je9jejP7bzI)Hc}e^VBfdFL@PhRA=u$`4A(zboKhKqHq5X9rSK z3GZhBZ{EKHC=JW1EHXCtqQfVGBUDXPPg(qNKh*lL7<3uj$85Hr&7e%;)oiDlk?lrI zfCt%{j(SrZMBiGRK=AWS{Of?|00#@At*)O{3|BqC2zE=PmPmui@;k1f@D`A)q-ObC z`O-9>1mo*HVB9oM4>xZWLwVE0U@w@wEAaW9k?v`L`I}e@GV8HmIQ*m^4x5ilQ$Pe0 z*cu@rVDXyhN5Kj#6PX&X3pobRFpmV1S=2#ITG_Dm+!#&w)4;#Ceu_g@$G&1772aYzlOseyz;g2_=1kk6i|cV8XJd6l8gCMTamxn^~?E~zc=w#B&ugTQ>3c!%#_>9 z7Um=%7df)%Vw6P~Aa^04GEjeF3nE^$kR3>1Xlb;)8z-SVVM72hiDWd-KXvHUlS0m8 zWPU1K?0DE5gN%M@OG*C}NbLXf2YcS!xY(d~Je`4UrKhKjgb-p|DJBDQA0BWFbGW#m z@eWuiU;^?2XdykA$-)B9w7QI*(x}xE$XSibO^u9uN?tWES{2)|WfD@fcKJe4vb6df zRE|&kK;R(?s#6b$;SpQAkUUgWa47eq5&PTwWlJ90c;`hYf`d^#Q%}|7<9({#X0vkQ z%@WA?r7fmY$$GJSJTR->BgxkriLp@fYLdc6gArp-9eHSmG&3xl0+7ubRAsw_!|PyUzPK+)e~0e4Fy##liqJuE`l=r!aPq=-9u z>|Ggmqa@=j%^OzJB;ibPic@hbo<%zjCykT%-a6l znbZkF#dpCIIi$O@qc=IyV2#c~c@h%9NlQ29mga>>%RF&Ux2%hbkoR|!6@sS`aZSVj z{+}?ei*C6zuZwCERrbj`1G|QrA?aV)-wb^D5!h{Z1OmJ9Aqecoc@fwR)JO*beoQss z>D-@P3k3d>`aV(5Mc`L8Ep#R{VQx3;fs_2I7opv5QD4Il+D(A?an>$0Ft!V9-f4}- zL32rLJ@9pzk8Ue4Us0E>w-j{Umkt_^i~?O$Si|QuLTf zSRxO{p7q{4D(F@dZ~}^j3>r;46{~pt8+=#girKnMjhS-X;7=syt5GRbi$J|#)@hdQ zC^_Bme|UMto0}(V+)KvzoPU-AJ#IQr%4jVxyFxSM;m*AA;Lp6DS_r;}4nNI{ce}mJ zB^hGo>tsH)1aj7+a#P8IE%as)v#lg~`R{y^3`E>Y6qwES`5UsM_Kc0h#{mEB>1hkI z97cf8_hBM?Ai+c!SfJFHDDzNkhcM%~zy=I>Tno)dQy`SlZel;Sz4&{5xi>eqMf8i& zjFw#wF;snQVdRB>7%_e{NEwvmK`f2{M#%xjdZLyT9&Oz5BUj*B2_Em6|CVg_@zi}x zr&ba?5*>ZXL|=}m(D z|0d_wE)7zEY*S7oRm;+MLm)W~XHLW`S`bJPGAVMqOavj2n2M7~&Qu`~-GDSS>zAYt zjoV7q*BkTNzF+~{x6rU0sz{)IU1sbc zuu&hDWk3CQ)0cw!kdO5f@>86R*r+#{HA#YwVPNGxt4aTv4w0JN%U$?~8y#*+j6yeC zf+9uo;ve&IBK|s8)XhgdXA(Yfj0GOpAwD&*Sdpt;{+%YDzBF>RN<^1IRfuDyl+h)i zettjm{bl`cZhpUE4;d~fg$FLYy=?&|LYEKPFDkTN7MNt<^=eIfpPUqz5e+7j6V$jQ znj^u2)o1rlRO-*>Joe_EsAfIoxuQ$2d~@YTC=6;-ByJ6%6h3+MS9J*GOA~@nzIY+y z5(p*mN$Oq%30B~6#((`EFTPO>;J6-b8GfwXpBKIA`WuE`-oCdmTXl*@L2f115m^$$ zNSN?`00%J|!VXFjM2?m$pd9gfB3nN#RNg5j(6bFp&5j8SB&XpFFjj#SjhVneVk*wd zP6VQ7!vqGhhBJY6i5fA1_3WPrsWE{H+4&h@0{vA*F@gS6lJ@wRAb$!$eI)7gM`>~k z+AmwRI`QgVCz1(NR#Np$V4xjZl#2w#Tt^OU7!&9(ocbTi1p1R9Rnzt3_?bX|ohxqe ze}_qW!zYfgz$?6V89b(Q0(9yG{r*G}avGIFwTM1T&PY;GKYx*1IRC?MZvLWi9~n3( ziI2NrzE||O1;_|rMf`k?f372WL4PtiON~pSc~ZP!FuQ-Q7H_#|@Xb9}&3YQn3kG`K z!i+`ZRgZ^*XH5*I%1}3_fJ`Vi8XPC*CnOI|ijYmqgkmO?a_o!ce|ZOH;!SMC9@-}! zeiGz+kVW*bg)#4DSr9ZG7)4=rUgOz_#}JCIFe>OBNXlC;E+7{*!cPso)vp&~30_&4 zSUIt>fcoVR%!FsUc^|EO`3Wjo$NKxy$mwrT2DR2ZlGm*tC`CDD;kXcpl6j~o@5usx zL+egksObN{y`u@$>!{wBm~AL3CD-h0l>nQ!j*^Xj+A zh4#LDbLQMLbAIN`nVB*?56J-PVAe)3|$D_x^wC^v?z$t5au1QGY<+%7kg( zx&&Fn1KxkHXKyd44rqei(=}-ozh)`D)*F48~+z+h0~5wF{vrNnNb)>I2ju^G#Z9`Sep*_Z)L&O1w z6&}Aa`wKmK84wo=N1|G28(G{G6cop7ZoBiowReCATe4!eIQw`_+(cpoYKa}tsoS8l zsxu<1=o`c@ugyHXcB4EY#*hp;0l~cAbvO3^!$6D)#C&YLKuiLT-I)Yp5{Q5y2gtI* zQvTA31Q^lTKul6RP9P?!8V6!BpvVivctP&_b-!pJMofXel0c0A6dU%#3fr#6c_^_X zE)e4(Tp-35H4=zfl#e{|wO^oRnb~UGHo=<*ViG!Kn1dsK(Oraj`V$a#8f|!o|yRg z@O2E~&G9ZgDkG!s zfc3r+SzsXT(!s@~rIhgFW$-3mg8%r@V>FW>69Jk5Q7F$Y7tTTUp7K;n2j!zFG-9|w zqeicm7+GF?zS;KLc1%>LL6R@rT^NEiPd)Jjr1a=p{yFt)+==HdI<_!Yj;4xyirJkB z=SlSllhM5im{?U%aHA)dkeQ}9*+p`SxiTPa0!YIq%~f-W=80-#eo@*V`l!Z7Jvg3^ znt)?>CO&Ec5sb{nM@@hco%K?W0}w~!m-SqN&5?EW$svd+858%KLKlrDI9zd|3 zp*jMrCH-?U1UNzy&U3F{Dqhm;8FLpMApsdOX}u z>A{2%qcP#PbPYYN?pgEaOHW>$Z`RaWN<7N;QzotM!K`vfk6qL1g-cXHDvTR8BFq zOwEn=$%^L2gN5eCgRsZ0=Ej$$5xv*9|LPO-Eh66B_|~+(d1h`r94L0xIWIfP>vn7& z$8!OeUIz>F99b6l-;?r3FVXCLRywvj1rcad&h)n_-G{w3`Ny4mZ_T%;Ow~tZM6P-_ zbv#D(5VDEfRG(f&1yIssXkfR|%dXmTu@pAtNL$7xf=@fPKQ^*Xq~iM6umAngW?|u$ z{(Dw$JcGYD`xWS7Ly5CVvZSujRj%D$kYk~?4&10}tip+>JBK~r2|$Iyji?str+B6U z3ZI0X0EeZ+e|qa}+!Z*pod6nF=CDdJUOef^jkt-#gmfnWH@C_G2#1mR!exCusXKZn z03qJbY>A>GuDh}SZ5~$2ibbxM=$P(QYcxx(fe^kH>mzdBO5!T9Y1Ng8KfLwJ7s0g7 z@pjD-?IX_ORx;y!*|zpzZ)%9J)4m88k?URZVP*+zBgB2;q zt6<7J87>rDQ7y!VW4vnC@G2y8J2(6;TiI+6xfTn?xF+JP@K)CKAzDYzjyuo&7XDCm zm0!nyZI>$gu3=Cut-&3s0Z6&R;ky3jcksVw2@i_23o9@KY8IDoD368Z zKAARl@yeO?Y)}G0KdLgM$+SaQQLN7!l?>r{I%s3=JRFK4$krgFRM3a@uv=`xx8b=n z9nQnSrQ02Vpe@x#%pY%!ebzsXTrZ?V|K^`ZFG6QcZ4I_bkLz0*Pi}D9v zFEp-}S7c1IychdP#nIv;H@6CJUj z$&tD(E4OshYz8Mni@Q%p;3BP1+4Uc4w%vnS|GoC^k{|8b{TcA}bYVgXnAsagtK-eq zUAS8936eNWN9!dn>6NOqMZtmH)WyIDo-#s9U!zf@=dd$cdLeBx5RG4;sc9w&`?Ft~ zV4BKlnR8H-kgppd(nh2gZPF|lrn7q0=2;aCwB`8JD~iaD`!%HSnCsv&ZEB1kVVatk z|Mru+Ak^=0jKzHiFA54`=Y&&@aQ`n8()h?6nP$G8W2@U%93?KAx4 z)U}|sJVnn|liG6OVxcZFjt5;+VFUb`?uzd}K6U#Wz^gfqaTnW0?7;}QVNci29vn&S zedk<65G+@i<(JKcN*tGfz zQ;=0(k?-5;0E^XauFHTZyj#0xRj+ zJ9fONY9AzC)W;l%@0s{41xcxzP?(C06g8cGSS=UT=LyLB*=ghr0zCV$u%h8S{a|bA z+~iG~msp(y^#`gcg9wM3_h|-yP9b7pNq;$i$VlfE5K`8VLf{K8SZta@o%-#p>ilgg3J08T8av(gUU}*dR%fkgR#;e@<|Fb)kD6kW#{M2!tmE8$ zI-x0(mrf<_KK9a^^F5WApg+iQJq3I-j%yzVR23E`f7H=E`Wd-mC0?!^<3UM{S)0W`!*oO#>~BNqmyCv?u6U==0S&`Ty=lPjAqFd#paQ3d7p@lMSxN(W zZtEe!M4qKAp{p#<4p%}P#E47Fcu);XW8%18OdOVtLtKZ;5qIs<>2IGCUp=z~m$)<{ zPU}U)aan)jEH3LZoINiyfYvY5nFy2!Q5ifEgt}uS9H0|o6c3Adh=cd;^X)fqLgA~I z2J3f!e>RE<$TgC57o|Xwgziz32t3BZLiG_Q4`C{IWdQaUpLgH=&hX$<=g#~O+RGn^ literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/testing.a b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/testing.a new file mode 100644 index 000000000..cc08ea8a5 --- /dev/null +++ b/Godeps/_workspace/pkg/darwin_amd64/github.com/fsouza/go-dockerclient/testing.a @@ -0,0 +1,1236 @@ +! +__.PKGDEF 0 0 0 644 123084 ` +go object darwin amd64 go1.4.2 X:precisestack + +$$ +package testing + import net "net" + import rand "crypto/rand" + import docker "github.com/fsouza/go-dockerclient" + import sync "sync" + import runtime "runtime" + import time "time" + import mux "github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux" + import errors "errors" + import http "net/http" + import rand "math/rand" + import strconv "strconv" + import strings "strings" + import stdcopy "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" + import fmt "fmt" + import regexp "regexp" + import tar "archive/tar" + import json "encoding/json" + type @"time".zone struct { @"time".name string; @"time".offset int; @"time".isDST bool } + type @"time".zoneTrans struct { @"time".when int64; @"time".index uint8; @"time".isstd bool; @"time".isutc bool } + type @"time".Location struct { @"time".name string; @"time".zone []@"time".zone; @"time".tx []@"time".zoneTrans; @"time".cacheStart int64; @"time".cacheEnd int64; @"time".cacheZone *@"time".zone } + func (@"time".l·2 *@"time".Location "esc:0x0") String () (? string) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".firstZoneUsed () (? bool) + func (@"time".l·2 *@"time".Location "esc:0x2") @"time".get () (? *@"time".Location) + func (@"time".l·6 *@"time".Location "esc:0x1") @"time".lookup (@"time".sec·7 int64) (@"time".name·1 string, @"time".offset·2 int, @"time".isDST·3 bool, @"time".start·4 int64, @"time".end·5 int64) + func (@"time".l·2 *@"time".Location "esc:0x0") @"time".lookupFirstZone () (? int) + func (@"time".l·4 *@"time".Location "esc:0x0") @"time".lookupName (@"time".name·5 string "esc:0x0", @"time".unix·6 int64) (@"time".offset·1 int, @"time".isDST·2 bool, @"time".ok·3 bool) + type @"time".Duration int64 + func (@"time".d·2 @"time".Duration) Hours () (? float64) { var @"time".hour·3 @"time".Duration; ; @"time".hour·3 = @"time".d·2 / @"time".Duration(0x34630B8A000); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x34630B8A000); return float64(@"time".hour·3) + float64(@"time".nsec·4) * 0x9C5FFF26ED75Fp-93 } + func (@"time".d·2 @"time".Duration) Minutes () (? float64) { var @"time".min·3 @"time".Duration; ; @"time".min·3 = @"time".d·2 / @"time".Duration(0xDF8475800); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0xDF8475800); return float64(@"time".min·3) + float64(@"time".nsec·4) * 0x9299FF347E9E9p-87 } + func (@"time".d·2 @"time".Duration) Nanoseconds () (? int64) { return int64(@"time".d·2) } + func (@"time".d·2 @"time".Duration) Seconds () (? float64) { var @"time".sec·3 @"time".Duration; ; @"time".sec·3 = @"time".d·2 / @"time".Duration(0x3B9ACA00); var @"time".nsec·4 @"time".Duration; ; @"time".nsec·4 = @"time".d·2 % @"time".Duration(0x3B9ACA00); return float64(@"time".sec·3) + float64(@"time".nsec·4) * 0x112E0BE826D695p-82 } + func (@"time".d·2 @"time".Duration) String () (? string) + type @"time".Month int + func (@"time".m·2 @"time".Month) String () (? string) { return @"time".months[@"time".m·2 - @"time".Month(0x1)] } + type @"time".Weekday int + func (@"time".d·2 @"time".Weekday) String () (? string) { return @"time".days[@"time".d·2] } + type @"time".Time struct { @"time".sec int64; @"time".nsec int32; @"time".loc *@"time".Location } + func (@"time".t·2 @"time".Time "esc:0x2") Add (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") AddDate (@"time".years·3 int, @"time".months·4 int, @"time".days·5 int) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") After (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec > @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec > @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Before (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec < @"time".u·3.@"time".sec || @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec < @"time".u·3.@"time".nsec } + func (@"time".t·4 @"time".Time "esc:0x0") Clock () (@"time".hour·1 int, @"time".min·2 int, @"time".sec·3 int) + func (@"time".t·4 @"time".Time "esc:0x0") Date () (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int) + func (@"time".t·2 @"time".Time "esc:0x0") Day () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Equal (@"time".u·3 @"time".Time "esc:0x0") (? bool) { return @"time".t·2.@"time".sec == @"time".u·3.@"time".sec && @"time".t·2.@"time".nsec == @"time".u·3.@"time".nsec } + func (@"time".t·2 @"time".Time "esc:0x0") Format (@"time".layout·3 string "esc:0x0") (? string) + func (@"time".t·2 *@"time".Time "esc:0x0") GobDecode (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·3 @"time".Time "esc:0x0") GobEncode () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Hour () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") ISOWeek () (@"time".year·1 int, @"time".week·2 int) + func (@"time".t·2 @"time".Time "esc:0x2") In (@"time".loc·3 *@"time".Location "esc:0x2") (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") IsZero () (? bool) { return @"time".t·2.@"time".sec == 0x0 && @"time".t·2.@"time".nsec == 0x0 } + func (@"time".t·2 @"time".Time "esc:0x2") Local () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".Local; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x2") Location () (? *@"time".Location) { var @"time".l·3 *@"time".Location; ; @"time".l·3 = @"time".t·2.@"time".loc; if @"time".l·3 == nil { @"time".l·3 = @"time".UTC }; return @"time".l·3 } + func (@"time".t·3 @"time".Time "esc:0x0") MarshalBinary () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"time".t·3 @"time".Time "esc:0x0") MarshalText () (? []byte, ? error) + func (@"time".t·2 @"time".Time "esc:0x0") Minute () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") Month () (? @"time".Month) + func (@"time".t·2 @"time".Time "esc:0x0") Nanosecond () (? int) { return int(@"time".t·2.@"time".nsec) } + func (@"time".t·2 @"time".Time "esc:0x2") Round (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x0") Second () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") String () (? string) + func (@"time".t·2 @"time".Time "esc:0x0") Sub (@"time".u·3 @"time".Time "esc:0x0") (? @"time".Duration) + func (@"time".t·2 @"time".Time "esc:0x2") Truncate (@"time".d·3 @"time".Duration) (? @"time".Time) + func (@"time".t·2 @"time".Time "esc:0x2") UTC () (? @"time".Time) { @"time".t·2.@"time".loc = @"time".UTC; return @"time".t·2 } + func (@"time".t·2 @"time".Time "esc:0x0") Unix () (? int64) { return @"time".t·2.@"time".sec + -0xE7791F700 } + func (@"time".t·2 @"time".Time "esc:0x0") UnixNano () (? int64) { return (@"time".t·2.@"time".sec + -0xE7791F700) * 0x3B9ACA00 + int64(@"time".t·2.@"time".nsec) } + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalBinary (@"time".data·3 []byte "esc:0x0") (? error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalJSON (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 *@"time".Time "esc:0x0") UnmarshalText (@"time".data·3 []byte "esc:0x0") (@"time".err·1 error) + func (@"time".t·2 @"time".Time "esc:0x0") Weekday () (? @"time".Weekday) + func (@"time".t·2 @"time".Time "esc:0x0") Year () (? int) + func (@"time".t·2 @"time".Time "esc:0x0") YearDay () (? int) + func (@"time".t·3 @"time".Time "esc:0x0") Zone () (@"time".name·1 string, @"time".offset·2 int) + func (@"time".t·2 @"time".Time "esc:0x0") @"time".abs () (? uint64) + func (@"time".t·5 @"time".Time "esc:0x0") @"time".date (@"time".full·6 bool) (@"time".year·1 int, @"time".month·2 @"time".Month, @"time".day·3 int, @"time".yday·4 int) + func (@"time".t·4 @"time".Time "esc:0x1") @"time".locabs () (@"time".name·1 string, @"time".offset·2 int, @"time".abs·3 uint64) + type @"github.com/fsouza/go-dockerclient".Port string + func (@"github.com/fsouza/go-dockerclient".p·2 @"github.com/fsouza/go-dockerclient".Port "esc:0x0") Port () (? string) + func (@"github.com/fsouza/go-dockerclient".p·2 @"github.com/fsouza/go-dockerclient".Port "esc:0x0") Proto () (? string) + type @"github.com/fsouza/go-dockerclient".Config struct { Hostname string "json:\"Hostname,omitempty\" yaml:\"Hostname,omitempty\""; Domainname string "json:\"Domainname,omitempty\" yaml:\"Domainname,omitempty\""; User string "json:\"User,omitempty\" yaml:\"User,omitempty\""; Memory int64 "json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""; MemorySwap int64 "json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""; CPUShares int64 "json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""; CPUSet string "json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""; AttachStdin bool "json:\"AttachStdin,omitempty\" yaml:\"AttachStdin,omitempty\""; AttachStdout bool "json:\"AttachStdout,omitempty\" yaml:\"AttachStdout,omitempty\""; AttachStderr bool "json:\"AttachStderr,omitempty\" yaml:\"AttachStderr,omitempty\""; PortSpecs []string "json:\"PortSpecs,omitempty\" yaml:\"PortSpecs,omitempty\""; ExposedPorts map[@"github.com/fsouza/go-dockerclient".Port]struct {} "json:\"ExposedPorts,omitempty\" yaml:\"ExposedPorts,omitempty\""; Tty bool "json:\"Tty,omitempty\" yaml:\"Tty,omitempty\""; OpenStdin bool "json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""; StdinOnce bool "json:\"StdinOnce,omitempty\" yaml:\"StdinOnce,omitempty\""; Env []string "json:\"Env,omitempty\" yaml:\"Env,omitempty\""; Cmd []string "json:\"Cmd\" yaml:\"Cmd\""; DNS []string "json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""; Image string "json:\"Image,omitempty\" yaml:\"Image,omitempty\""; Volumes map[string]struct {} "json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""; VolumesFrom string "json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""; WorkingDir string "json:\"WorkingDir,omitempty\" yaml:\"WorkingDir,omitempty\""; MacAddress string "json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""; Entrypoint []string "json:\"Entrypoint\" yaml:\"Entrypoint\""; NetworkDisabled bool "json:\"NetworkDisabled,omitempty\" yaml:\"NetworkDisabled,omitempty\""; SecurityOpts []string "json:\"SecurityOpts,omitempty\" yaml:\"SecurityOpts,omitempty\""; OnBuild []string "json:\"OnBuild,omitempty\" yaml:\"OnBuild,omitempty\""; Labels map[string]string "json:\"Labels,omitempty\" yaml:\"Labels,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".State struct { Running bool "json:\"Running,omitempty\" yaml:\"Running,omitempty\""; Paused bool "json:\"Paused,omitempty\" yaml:\"Paused,omitempty\""; Restarting bool "json:\"Restarting,omitempty\" yaml:\"Restarting,omitempty\""; OOMKilled bool "json:\"OOMKilled,omitempty\" yaml:\"OOMKilled,omitempty\""; Pid int "json:\"Pid,omitempty\" yaml:\"Pid,omitempty\""; ExitCode int "json:\"ExitCode,omitempty\" yaml:\"ExitCode,omitempty\""; Error string "json:\"Error,omitempty\" yaml:\"Error,omitempty\""; StartedAt @"time".Time "json:\"StartedAt,omitempty\" yaml:\"StartedAt,omitempty\""; FinishedAt @"time".Time "json:\"FinishedAt,omitempty\" yaml:\"FinishedAt,omitempty\"" } + func (@"github.com/fsouza/go-dockerclient".s·2 *@"github.com/fsouza/go-dockerclient".State "esc:0x0") String () (? string) + type @"github.com/fsouza/go-dockerclient".SwarmNode struct { ID string "json:\"ID,omitempty\" yaml:\"ID,omitempty\""; IP string "json:\"IP,omitempty\" yaml:\"IP,omitempty\""; Addr string "json:\"Addr,omitempty\" yaml:\"Addr,omitempty\""; Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; CPUs int64 "json:\"CPUs,omitempty\" yaml:\"CPUs,omitempty\""; Memory int64 "json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""; Labels map[string]string "json:\"Labels,omitempty\" yaml:\"Labels,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".PortMapping map[string]string + type @"github.com/fsouza/go-dockerclient".PortBinding struct { HostIP string "json:\"HostIP,omitempty\" yaml:\"HostIP,omitempty\""; HostPort string "json:\"HostPort,omitempty\" yaml:\"HostPort,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".APIPort struct { PrivatePort int64 "json:\"PrivatePort,omitempty\" yaml:\"PrivatePort,omitempty\""; PublicPort int64 "json:\"PublicPort,omitempty\" yaml:\"PublicPort,omitempty\""; Type string "json:\"Type,omitempty\" yaml:\"Type,omitempty\""; IP string "json:\"IP,omitempty\" yaml:\"IP,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".NetworkSettings struct { IPAddress string "json:\"IPAddress,omitempty\" yaml:\"IPAddress,omitempty\""; IPPrefixLen int "json:\"IPPrefixLen,omitempty\" yaml:\"IPPrefixLen,omitempty\""; MacAddress string "json:\"MacAddress,omitempty\" yaml:\"MacAddress,omitempty\""; Gateway string "json:\"Gateway,omitempty\" yaml:\"Gateway,omitempty\""; Bridge string "json:\"Bridge,omitempty\" yaml:\"Bridge,omitempty\""; PortMapping map[string]@"github.com/fsouza/go-dockerclient".PortMapping "json:\"PortMapping,omitempty\" yaml:\"PortMapping,omitempty\""; Ports map[@"github.com/fsouza/go-dockerclient".Port][]@"github.com/fsouza/go-dockerclient".PortBinding "json:\"Ports,omitempty\" yaml:\"Ports,omitempty\""; NetworkID string "json:\"NetworkID,omitempty\" yaml:\"NetworkID,omitempty\""; EndpointID string "json:\"EndpointID,omitempty\" yaml:\"EndpointID,omitempty\""; SandboxKey string "json:\"SandboxKey,omitempty\" yaml:\"SandboxKey,omitempty\""; GlobalIPv6Address string "json:\"GlobalIPv6Address,omitempty\" yaml:\"GlobalIPv6Address,omitempty\""; GlobalIPv6PrefixLen int "json:\"GlobalIPv6PrefixLen,omitempty\" yaml:\"GlobalIPv6PrefixLen,omitempty\""; IPv6Gateway string "json:\"IPv6Gateway,omitempty\" yaml:\"IPv6Gateway,omitempty\""; LinkLocalIPv6Address string "json:\"LinkLocalIPv6Address,omitempty\" yaml:\"LinkLocalIPv6Address,omitempty\""; LinkLocalIPv6PrefixLen int "json:\"LinkLocalIPv6PrefixLen,omitempty\" yaml:\"LinkLocalIPv6PrefixLen,omitempty\""; SecondaryIPAddresses []string "json:\"SecondaryIPAddresses,omitempty\" yaml:\"SecondaryIPAddresses,omitempty\""; SecondaryIPv6Addresses []string "json:\"SecondaryIPv6Addresses,omitempty\" yaml:\"SecondaryIPv6Addresses,omitempty\"" } + func (@"github.com/fsouza/go-dockerclient".settings·2 *@"github.com/fsouza/go-dockerclient".NetworkSettings "esc:0x0") PortMappingAPI () (? []@"github.com/fsouza/go-dockerclient".APIPort) + type @"github.com/fsouza/go-dockerclient".KeyValuePair struct { Key string "json:\"Key,omitempty\" yaml:\"Key,omitempty\""; Value string "json:\"Value,omitempty\" yaml:\"Value,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".RestartPolicy struct { Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; MaximumRetryCount int "json:\"MaximumRetryCount,omitempty\" yaml:\"MaximumRetryCount,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".Device struct { PathOnHost string "json:\"PathOnHost,omitempty\" yaml:\"PathOnHost,omitempty\""; PathInContainer string "json:\"PathInContainer,omitempty\" yaml:\"PathInContainer,omitempty\""; CgroupPermissions string "json:\"CgroupPermissions,omitempty\" yaml:\"CgroupPermissions,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".LogConfig struct { Type string "json:\"Type,omitempty\" yaml:\"Type,omitempty\""; Config map[string]string "json:\"Config,omitempty\" yaml:\"Config,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".ULimit struct { Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; Soft int64 "json:\"Soft,omitempty\" yaml:\"Soft,omitempty\""; Hard int64 "json:\"Hard,omitempty\" yaml:\"Hard,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".HostConfig struct { Binds []string "json:\"Binds,omitempty\" yaml:\"Binds,omitempty\""; CapAdd []string "json:\"CapAdd,omitempty\" yaml:\"CapAdd,omitempty\""; CapDrop []string "json:\"CapDrop,omitempty\" yaml:\"CapDrop,omitempty\""; ContainerIDFile string "json:\"ContainerIDFile,omitempty\" yaml:\"ContainerIDFile,omitempty\""; LxcConf []@"github.com/fsouza/go-dockerclient".KeyValuePair "json:\"LxcConf,omitempty\" yaml:\"LxcConf,omitempty\""; Privileged bool "json:\"Privileged,omitempty\" yaml:\"Privileged,omitempty\""; PortBindings map[@"github.com/fsouza/go-dockerclient".Port][]@"github.com/fsouza/go-dockerclient".PortBinding "json:\"PortBindings,omitempty\" yaml:\"PortBindings,omitempty\""; Links []string "json:\"Links,omitempty\" yaml:\"Links,omitempty\""; PublishAllPorts bool "json:\"PublishAllPorts,omitempty\" yaml:\"PublishAllPorts,omitempty\""; DNS []string "json:\"Dns,omitempty\" yaml:\"Dns,omitempty\""; DNSSearch []string "json:\"DnsSearch,omitempty\" yaml:\"DnsSearch,omitempty\""; ExtraHosts []string "json:\"ExtraHosts,omitempty\" yaml:\"ExtraHosts,omitempty\""; VolumesFrom []string "json:\"VolumesFrom,omitempty\" yaml:\"VolumesFrom,omitempty\""; NetworkMode string "json:\"NetworkMode,omitempty\" yaml:\"NetworkMode,omitempty\""; IpcMode string "json:\"IpcMode,omitempty\" yaml:\"IpcMode,omitempty\""; PidMode string "json:\"PidMode,omitempty\" yaml:\"PidMode,omitempty\""; UTSMode string "json:\"UTSMode,omitempty\" yaml:\"UTSMode,omitempty\""; RestartPolicy @"github.com/fsouza/go-dockerclient".RestartPolicy "json:\"RestartPolicy,omitempty\" yaml:\"RestartPolicy,omitempty\""; Devices []@"github.com/fsouza/go-dockerclient".Device "json:\"Devices,omitempty\" yaml:\"Devices,omitempty\""; LogConfig @"github.com/fsouza/go-dockerclient".LogConfig "json:\"LogConfig,omitempty\" yaml:\"LogConfig,omitempty\""; ReadonlyRootfs bool "json:\"ReadonlyRootfs,omitempty\" yaml:\"ReadonlyRootfs,omitempty\""; SecurityOpt []string "json:\"SecurityOpt,omitempty\" yaml:\"SecurityOpt,omitempty\""; CgroupParent string "json:\"CgroupParent,omitempty\" yaml:\"CgroupParent,omitempty\""; Memory int64 "json:\"Memory,omitempty\" yaml:\"Memory,omitempty\""; MemorySwap int64 "json:\"MemorySwap,omitempty\" yaml:\"MemorySwap,omitempty\""; CPUShares int64 "json:\"CpuShares,omitempty\" yaml:\"CpuShares,omitempty\""; CPUSet string "json:\"Cpuset,omitempty\" yaml:\"Cpuset,omitempty\""; CPUQuota int64 "json:\"CpuQuota,omitempty\" yaml:\"CpuQuota,omitempty\""; CPUPeriod int64 "json:\"CpuPeriod,omitempty\" yaml:\"CpuPeriod,omitempty\""; Ulimits []@"github.com/fsouza/go-dockerclient".ULimit "json:\"Ulimits,omitempty\" yaml:\"Ulimits,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".Container struct { ID string "json:\"Id\" yaml:\"Id\""; Created @"time".Time "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; Path string "json:\"Path,omitempty\" yaml:\"Path,omitempty\""; Args []string "json:\"Args,omitempty\" yaml:\"Args,omitempty\""; Config *@"github.com/fsouza/go-dockerclient".Config "json:\"Config,omitempty\" yaml:\"Config,omitempty\""; State @"github.com/fsouza/go-dockerclient".State "json:\"State,omitempty\" yaml:\"State,omitempty\""; Image string "json:\"Image,omitempty\" yaml:\"Image,omitempty\""; Node *@"github.com/fsouza/go-dockerclient".SwarmNode "json:\"Node,omitempty\" yaml:\"Node,omitempty\""; NetworkSettings *@"github.com/fsouza/go-dockerclient".NetworkSettings "json:\"NetworkSettings,omitempty\" yaml:\"NetworkSettings,omitempty\""; SysInitPath string "json:\"SysInitPath,omitempty\" yaml:\"SysInitPath,omitempty\""; ResolvConfPath string "json:\"ResolvConfPath,omitempty\" yaml:\"ResolvConfPath,omitempty\""; HostnamePath string "json:\"HostnamePath,omitempty\" yaml:\"HostnamePath,omitempty\""; HostsPath string "json:\"HostsPath,omitempty\" yaml:\"HostsPath,omitempty\""; LogPath string "json:\"LogPath,omitempty\" yaml:\"LogPath,omitempty\""; Name string "json:\"Name,omitempty\" yaml:\"Name,omitempty\""; Driver string "json:\"Driver,omitempty\" yaml:\"Driver,omitempty\""; Volumes map[string]string "json:\"Volumes,omitempty\" yaml:\"Volumes,omitempty\""; VolumesRW map[string]bool "json:\"VolumesRW,omitempty\" yaml:\"VolumesRW,omitempty\""; HostConfig *@"github.com/fsouza/go-dockerclient".HostConfig "json:\"HostConfig,omitempty\" yaml:\"HostConfig,omitempty\""; ExecIDs []string "json:\"ExecIDs,omitempty\" yaml:\"ExecIDs,omitempty\""; RestartCount int "json:\"RestartCount,omitempty\" yaml:\"RestartCount,omitempty\""; AppArmorProfile string "json:\"AppArmorProfile,omitempty\" yaml:\"AppArmorProfile,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".ExecProcessConfig struct { Privileged bool "json:\"privileged,omitempty\" yaml:\"privileged,omitempty\""; User string "json:\"user,omitempty\" yaml:\"user,omitempty\""; Tty bool "json:\"tty,omitempty\" yaml:\"tty,omitempty\""; EntryPoint string "json:\"entrypoint,omitempty\" yaml:\"entrypoint,omitempty\""; Arguments []string "json:\"arguments,omitempty\" yaml:\"arguments,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".ExecInspect struct { ID string "json:\"ID,omitempty\" yaml:\"ID,omitempty\""; Running bool "json:\"Running,omitempty\" yaml:\"Running,omitempty\""; ExitCode int "json:\"ExitCode,omitempty\" yaml:\"ExitCode,omitempty\""; OpenStdin bool "json:\"OpenStdin,omitempty\" yaml:\"OpenStdin,omitempty\""; OpenStderr bool "json:\"OpenStderr,omitempty\" yaml:\"OpenStderr,omitempty\""; OpenStdout bool "json:\"OpenStdout,omitempty\" yaml:\"OpenStdout,omitempty\""; ProcessConfig @"github.com/fsouza/go-dockerclient".ExecProcessConfig "json:\"ProcessConfig,omitempty\" yaml:\"ProcessConfig,omitempty\""; Container @"github.com/fsouza/go-dockerclient".Container "json:\"Container,omitempty\" yaml:\"Container,omitempty\"" } + type @"sync".Mutex struct { @"sync".state int32; @"sync".sema uint32 } + func (@"sync".m·1 *@"sync".Mutex) Lock () + func (@"sync".m·1 *@"sync".Mutex) Unlock () + type @"sync".Locker interface { Lock(); Unlock() } + type @"sync".RWMutex struct { @"sync".w @"sync".Mutex; @"sync".writerSem uint32; @"sync".readerSem uint32; @"sync".readerCount int32; @"sync".readerWait int32 } + func (@"sync".rw·1 *@"sync".RWMutex) Lock () + func (@"sync".rw·1 *@"sync".RWMutex) RLock () + func (@"sync".rw·2 *@"sync".RWMutex "esc:0x2") RLocker () (? @"sync".Locker) { return (*@"sync".rlocker)(@"sync".rw·2) } + func (@"sync".rw·1 *@"sync".RWMutex) RUnlock () + func (@"sync".rw·1 *@"sync".RWMutex) Unlock () + type @"github.com/fsouza/go-dockerclient".Image struct { ID string "json:\"Id\" yaml:\"Id\""; Parent string "json:\"Parent,omitempty\" yaml:\"Parent,omitempty\""; Comment string "json:\"Comment,omitempty\" yaml:\"Comment,omitempty\""; Created @"time".Time "json:\"Created,omitempty\" yaml:\"Created,omitempty\""; Container string "json:\"Container,omitempty\" yaml:\"Container,omitempty\""; ContainerConfig @"github.com/fsouza/go-dockerclient".Config "json:\"ContainerConfig,omitempty\" yaml:\"ContainerConfig,omitempty\""; DockerVersion string "json:\"DockerVersion,omitempty\" yaml:\"DockerVersion,omitempty\""; Author string "json:\"Author,omitempty\" yaml:\"Author,omitempty\""; Config *@"github.com/fsouza/go-dockerclient".Config "json:\"Config,omitempty\" yaml:\"Config,omitempty\""; Architecture string "json:\"Architecture,omitempty\" yaml:\"Architecture,omitempty\""; Size int64 "json:\"Size,omitempty\" yaml:\"Size,omitempty\""; VirtualSize int64 "json:\"VirtualSize,omitempty\" yaml:\"VirtualSize,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".Endpoint struct { Name string "json:\"name\""; ID string "json:\"id\""; Network string "json:\"network\"" } + type @"github.com/fsouza/go-dockerclient".Network struct { Name string "json:\"name\""; ID string "json:\"id\""; Type string "json:\"type\""; Endpoints []*@"github.com/fsouza/go-dockerclient".Endpoint "json:\"endpoints\"" } + type @"net".Addr interface { Network() (? string); String() (? string) } + type @"net".Conn interface { Close() (? error); LocalAddr() (? @"net".Addr); Read(@"net".b []byte) (@"net".n int, @"net".err error); RemoteAddr() (? @"net".Addr); SetDeadline(@"net".t @"time".Time) (? error); SetReadDeadline(@"net".t @"time".Time) (? error); SetWriteDeadline(@"net".t @"time".Time) (? error); Write(@"net".b []byte) (@"net".n int, @"net".err error) } + type @"net".Listener interface { Accept() (@"net".c @"net".Conn, @"net".err error); Addr() (? @"net".Addr); Close() (? error) } + import io "io" // indirect + type @"io".Writer interface { Write(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"net/http".keyValues struct { @"net/http".key string; @"net/http".values []string } + type @"net/http".headerSorter struct { @"net/http".kvs []@"net/http".keyValues } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Len () (? int) { return len(@"net/http".s·2.@"net/http".kvs) } + func (@"net/http".s·2 *@"net/http".headerSorter "esc:0x0") Less (@"net/http".i·3 int, @"net/http".j·4 int) (? bool) { return @"net/http".s·2.@"net/http".kvs[@"net/http".i·3].@"net/http".key < @"net/http".s·2.@"net/http".kvs[@"net/http".j·4].@"net/http".key } + func (@"net/http".s·1 *@"net/http".headerSorter "esc:0x0") Swap (@"net/http".i·2 int, @"net/http".j·3 int) { @"net/http".s·1.@"net/http".kvs[@"net/http".i·2], @"net/http".s·1.@"net/http".kvs[@"net/http".j·3] = @"net/http".s·1.@"net/http".kvs[@"net/http".j·3], @"net/http".s·1.@"net/http".kvs[@"net/http".i·2] } + type @"net/http".Header map[string][]string + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Add (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Del (@"net/http".key·2 string "esc:0x0") + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Get (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".h·1 @"net/http".Header "esc:0x0") Set (@"net/http".key·2 string, @"net/http".value·3 string) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") WriteSubset (@"net/http".w·3 @"io".Writer, @"net/http".exclude·4 map[string]bool "esc:0x0") (? error) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".clone () (? @"net/http".Header) + func (@"net/http".h·2 @"net/http".Header "esc:0x0") @"net/http".get (@"net/http".key·3 string "esc:0x0") (? string) { var @"net/http".v·4 []string; ; @"net/http".v·4 = @"net/http".h·2[@"net/http".key·3]; if len(@"net/http".v·4) > 0x0 { return @"net/http".v·4[0x0] }; return "" } + func (@"net/http".h·3 @"net/http".Header "esc:0x0") @"net/http".sortedKeyValues (@"net/http".exclude·4 map[string]bool "esc:0x0") (@"net/http".kvs·1 []@"net/http".keyValues, @"net/http".hs·2 *@"net/http".headerSorter) + type @"net/http".ResponseWriter interface { Header() (? @"net/http".Header); Write(? []byte) (? int, ? error); WriteHeader(? int) } + import url "net/url" // indirect + type @"net/url".Userinfo struct { @"net/url".username string; @"net/url".password string; @"net/url".passwordSet bool } + func (@"net/url".u·3 *@"net/url".Userinfo "esc:0x1") Password () (? string, ? bool) { if @"net/url".u·3.@"net/url".passwordSet { return @"net/url".u·3.@"net/url".password, true }; return "", false } + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") String () (? string) + func (@"net/url".u·2 *@"net/url".Userinfo "esc:0x1") Username () (? string) { return @"net/url".u·2.@"net/url".username } + type @"net/url".Values map[string][]string + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Add (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = append(@"net/url".v·1[@"net/url".key·2], @"net/url".value·3) } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Del (@"net/url".key·2 string "esc:0x0") { delete(@"net/url".v·1, @"net/url".key·2) } + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Encode () (? string) + func (@"net/url".v·2 @"net/url".Values "esc:0x0") Get (@"net/url".key·3 string "esc:0x0") (? string) { if @"net/url".v·2 == nil { return "" }; var @"net/url".vs·4 []string; ; var @"net/url".ok·5 bool; ; @"net/url".vs·4, @"net/url".ok·5 = @"net/url".v·2[@"net/url".key·3]; if !@"net/url".ok·5 || len(@"net/url".vs·4) == 0x0 { return "" }; return @"net/url".vs·4[0x0] } + func (@"net/url".v·1 @"net/url".Values "esc:0x0") Set (@"net/url".key·2 string, @"net/url".value·3 string) { @"net/url".v·1[@"net/url".key·2] = ([]string{ 0x0:@"net/url".value·3 }) } + type @"net/url".URL struct { Scheme string; Opaque string; User *@"net/url".Userinfo; Host string; Path string; RawQuery string; Fragment string } + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") IsAbs () (? bool) { return @"net/url".u·2.Scheme != "" } + func (@"net/url".u·3 *@"net/url".URL "esc:0x2") Parse (@"net/url".ref·4 string) (? *@"net/url".URL, ? error) + func (@"net/url".u·2 *@"net/url".URL) Query () (? @"net/url".Values) + func (@"net/url".u·2 *@"net/url".URL "esc:0x1") RequestURI () (? string) + func (@"net/url".u·2 *@"net/url".URL "esc:0x2") ResolveReference (@"net/url".ref·3 *@"net/url".URL "esc:0x2") (? *@"net/url".URL) + func (@"net/url".u·2 *@"net/url".URL "esc:0x0") String () (? string) + type @"io".ReadCloser interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error) } + import multipart "mime/multipart" // indirect + import textproto "net/textproto" // indirect + type @"net/textproto".MIMEHeader map[string][]string + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Add (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Del (@"net/textproto".key·2 string "esc:0x0") + func (@"net/textproto".h·2 @"net/textproto".MIMEHeader "esc:0x0") Get (@"net/textproto".key·3 string "esc:0x0") (? string) + func (@"net/textproto".h·1 @"net/textproto".MIMEHeader "esc:0x0") Set (@"net/textproto".key·2 string, @"net/textproto".value·3 string) + type @"mime/multipart".File interface { Close() (? error); Read(@"io".p []byte) (@"io".n int, @"io".err error); ReadAt(@"io".p []byte, @"io".off int64) (@"io".n int, @"io".err error); Seek(@"io".offset int64, @"io".whence int) (? int64, ? error) } + type @"mime/multipart".FileHeader struct { Filename string; Header @"net/textproto".MIMEHeader; @"mime/multipart".content []byte; @"mime/multipart".tmpfile string } + func (@"mime/multipart".fh·3 *@"mime/multipart".FileHeader) Open () (? @"mime/multipart".File, ? error) + type @"mime/multipart".Form struct { Value map[string][]string; File map[string][]*@"mime/multipart".FileHeader } + func (@"mime/multipart".f·2 *@"mime/multipart".Form "esc:0x0") RemoveAll () (? error) + import tls "crypto/tls" // indirect + import x509 "crypto/x509" // indirect + type @"crypto/x509".SignatureAlgorithm int + type @"crypto/x509".PublicKeyAlgorithm int + import big "math/big" // indirect + type @"math/big".Word uintptr + type @"math/big".divisor struct { @"math/big".bbb @"math/big".nat; @"math/big".nbits int; @"math/big".ndigits int } + type @"math/rand".Source interface { Int63() (? int64); Seed(@"math/rand".seed int64) } + type @"math/rand".Rand struct { @"math/rand".src @"math/rand".Source } + func (@"math/rand".r·2 *@"math/rand".Rand) ExpFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Float32 () (? float32) + func (@"math/rand".r·2 *@"math/rand".Rand) Float64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int () (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31 () (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int31n (@"math/rand".n·3 int32) (? int32) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63 () (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Int63n (@"math/rand".n·3 int64) (? int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Intn (@"math/rand".n·3 int) (? int) + func (@"math/rand".r·2 *@"math/rand".Rand) NormFloat64 () (? float64) + func (@"math/rand".r·2 *@"math/rand".Rand) Perm (@"math/rand".n·3 int) (? []int) + func (@"math/rand".r·1 *@"math/rand".Rand) Seed (@"math/rand".seed·2 int64) + func (@"math/rand".r·2 *@"math/rand".Rand) Uint32 () (? uint32) + type @"io".RuneScanner interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error); UnreadRune() (? error) } + type @"math/big".nat []@"math/big".Word + func (@"math/big".z·2 @"math/big".nat) @"math/big".add (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".and (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".andNot (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bit (@"math/big".i·3 uint) (? uint) { var @"math/big".j·4 int; ; @"math/big".j·4 = int(@"math/big".i·3 / 0x40); if @"math/big".j·4 >= len(@"math/big".z·2) { return 0x0 }; return uint(@"math/big".z·2[@"math/big".j·4] >> (@"math/big".i·3 % 0x40) & @"math/big".Word(0x1)) } + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".bitLen () (? int) + func (@"math/big".z·2 @"math/big".nat "esc:0x0") @"math/big".bytes (@"math/big".buf·3 []byte "esc:0x0") (@"math/big".i·1 int) + func (@"math/big".z·1 @"math/big".nat "esc:0x0") @"math/big".clear () + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".cmp (@"math/big".y·3 @"math/big".nat "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".q·1 @"math/big".nat) @"math/big".convertWords (@"math/big".s·2 []byte "esc:0x0", @"math/big".charset·3 string "esc:0x0", @"math/big".b·4 @"math/big".Word, @"math/big".ndigits·5 int, @"math/big".bb·6 @"math/big".Word, @"math/big".table·7 []@"math/big".divisor "esc:0x0") + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".decimalString () (? string) + func (@"math/big".z·3 @"math/big".nat) @"math/big".div (@"math/big".z2·4 @"math/big".nat, @"math/big".u·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat "esc:0x2") @"math/big".divLarge (@"math/big".u·4 @"math/big".nat, @"math/big".uIn·5 @"math/big".nat, @"math/big".v·6 @"math/big".nat) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".nat) + func (@"math/big".z·3 @"math/big".nat) @"math/big".divW (@"math/big".x·4 @"math/big".nat, @"math/big".y·5 @"math/big".Word) (@"math/big".q·1 @"math/big".nat, @"math/big".r·2 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNN (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expNNWindowed (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat "esc:0x0", @"math/big".m·5 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".expWW (@"math/big".x·3 @"math/big".Word, @"math/big".y·4 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".make (@"math/big".n·3 int) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat) @"math/big".modW (@"math/big".d·3 @"math/big".Word) (@"math/big".r·1 @"math/big".Word) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mul (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulAddWW (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".Word, @"math/big".r·5 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".mulRange (@"math/big".a·3 uint64, @"math/big".b·4 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".norm () (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".or (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".n·2 @"math/big".nat) @"math/big".probablyPrime (@"math/big".reps·3 int) (? bool) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".random (@"math/big".rand·3 *@"math/rand".Rand, @"math/big".limit·4 @"math/big".nat "esc:0x0", @"math/big".n·5 int) (? @"math/big".nat) + func (@"math/big".z·4 @"math/big".nat) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? @"math/big".nat, ? int, ? error) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".set (@"math/big".x·3 @"math/big".nat "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBit (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".i·4 uint, @"math/big".b·5 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setBytes (@"math/big".buf·3 []byte "esc:0x0") (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setUint64 (@"math/big".x·3 uint64) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".setWord (@"math/big".x·3 @"math/big".Word) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shl (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".z·2 @"math/big".nat) @"math/big".shr (@"math/big".x·3 @"math/big".nat, @"math/big".s·4 uint) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".string (@"math/big".charset·3 string "esc:0x0") (? string) + func (@"math/big".z·2 @"math/big".nat) @"math/big".sub (@"math/big".x·3 @"math/big".nat, @"math/big".y·4 @"math/big".nat) (? @"math/big".nat) + func (@"math/big".x·2 @"math/big".nat "esc:0x0") @"math/big".trailingZeroBits () (? uint) + func (@"math/big".z·2 @"math/big".nat "esc:0x2") @"math/big".xor (@"math/big".x·3 @"math/big".nat "esc:0x0", @"math/big".y·4 @"math/big".nat "esc:0x0") (? @"math/big".nat) + type @"fmt".State interface { Flag(@"fmt".c int) (? bool); Precision() (@"fmt".prec int, @"fmt".ok bool); Width() (@"fmt".wid int, @"fmt".ok bool); Write(@"fmt".b []byte) (@"fmt".ret int, @"fmt".err error) } + type @"fmt".ScanState interface { Read(@"fmt".buf []byte) (@"fmt".n int, @"fmt".err error); ReadRune() (@"fmt".r rune, @"fmt".size int, @"fmt".err error); SkipSpace(); Token(@"fmt".skipSpace bool, @"fmt".f func(? rune) (? bool)) (@"fmt".token []byte, @"fmt".err error); UnreadRune() (? error); Width() (@"fmt".wid int, @"fmt".ok bool) } + type @"math/big".Int struct { @"math/big".neg bool; @"math/big".abs @"math/big".nat } + func (@"math/big".z·2 *@"math/big".Int) Abs (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Add (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) And (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) AndNot (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Binomial (@"math/big".n·3 int64, @"math/big".k·4 int64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) Bit (@"math/big".i·3 int) (? uint) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") BitLen () (? int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x1") Bits () (? []@"math/big".Word) { return @"math/big".x·2.@"math/big".abs } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Bytes () (? []byte) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Cmp (@"math/big".y·3 *@"math/big".Int "esc:0x0") (@"math/big".r·1 int) + func (@"math/big".z·2 *@"math/big".Int) Div (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) DivMod (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".m·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Exp (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int "esc:0x0", @"math/big".m·5 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·1 *@"math/big".Int "esc:0x0") Format (@"math/big".s·2 @"fmt".State, @"math/big".ch·3 rune) + func (@"math/big".z·2 *@"math/big".Int) GCD (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int, @"math/big".a·5 *@"math/big".Int, @"math/big".b·6 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) GobDecode (@"math/big".buf·3 []byte "esc:0x0") (? error) + func (@"math/big".x·3 *@"math/big".Int "esc:0x0") GobEncode () (? []byte, ? error) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Int64 () (? int64) + func (@"math/big".z·2 *@"math/big".Int) Lsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalJSON () (? []byte, ? error) + func (@"math/big".z·3 *@"math/big".Int "esc:0x0") MarshalText () (@"math/big".text·1 []byte, @"math/big".err·2 error) + func (@"math/big".z·2 *@"math/big".Int) Mod (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) ModInverse (@"math/big".g·3 *@"math/big".Int, @"math/big".n·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Mul (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) MulRange (@"math/big".a·3 int64, @"math/big".b·4 int64) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Neg (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Not (@"math/big".x·3 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Or (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int) ProbablyPrime (@"math/big".n·3 int) (? bool) + func (@"math/big".z·2 *@"math/big".Int) Quo (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) QuoRem (@"math/big".x·4 *@"math/big".Int, @"math/big".y·5 *@"math/big".Int, @"math/big".r·6 *@"math/big".Int) (? *@"math/big".Int, ? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rand (@"math/big".rnd·3 *@"math/rand".Rand, @"math/big".n·4 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rem (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Rsh (@"math/big".x·3 *@"math/big".Int, @"math/big".n·4 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) Scan (@"math/big".s·3 @"fmt".ScanState, @"math/big".ch·4 rune) (? error) + func (@"math/big".z·2 *@"math/big".Int) Set (@"math/big".x·3 *@"math/big".Int "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBit (@"math/big".x·3 *@"math/big".Int, @"math/big".i·4 int, @"math/big".b·5 uint) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int "esc:0x2") SetBits (@"math/big".abs·3 []@"math/big".Word) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetBytes (@"math/big".buf·3 []byte "esc:0x0") (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) SetInt64 (@"math/big".x·3 int64) (? *@"math/big".Int) + func (@"math/big".z·3 *@"math/big".Int) SetString (@"math/big".s·4 string, @"math/big".base·5 int) (? *@"math/big".Int, ? bool) + func (@"math/big".z·2 *@"math/big".Int) SetUint64 (@"math/big".x·3 uint64) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Sign () (? int) { if len(@"math/big".x·2.@"math/big".abs) == 0x0 { return 0x0 }; if @"math/big".x·2.@"math/big".neg { return -0x1 }; return 0x1 } + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") String () (? string) + func (@"math/big".z·2 *@"math/big".Int) Sub (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".x·2 *@"math/big".Int "esc:0x0") Uint64 () (? uint64) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalJSON (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) UnmarshalText (@"math/big".text·3 []byte) (? error) + func (@"math/big".z·2 *@"math/big".Int) Xor (@"math/big".x·3 *@"math/big".Int, @"math/big".y·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·2 *@"math/big".Int) @"math/big".binaryGCD (@"math/big".a·3 *@"math/big".Int, @"math/big".b·4 *@"math/big".Int) (? *@"math/big".Int) + func (@"math/big".z·4 *@"math/big".Int) @"math/big".scan (@"math/big".r·5 @"io".RuneScanner, @"math/big".base·6 int) (? *@"math/big".Int, ? int, ? error) + import pkix "crypto/x509/pkix" // indirect + import asn1 "encoding/asn1" // indirect + type @"encoding/asn1".ObjectIdentifier []int + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") Equal (@"encoding/asn1".other·3 @"encoding/asn1".ObjectIdentifier "esc:0x0") (? bool) + func (@"encoding/asn1".oi·2 @"encoding/asn1".ObjectIdentifier "esc:0x0") String () (? string) + type @"crypto/x509/pkix".AttributeTypeAndValue struct { Type @"encoding/asn1".ObjectIdentifier; Value interface {} } + type @"crypto/x509/pkix".RelativeDistinguishedNameSET []@"crypto/x509/pkix".AttributeTypeAndValue + type @"crypto/x509/pkix".RDNSequence []@"crypto/x509/pkix".RelativeDistinguishedNameSET + type @"crypto/x509/pkix".Name struct { Country []string; Organization []string; OrganizationalUnit []string; Locality []string; Province []string; StreetAddress []string; PostalCode []string; SerialNumber string; CommonName string; Names []@"crypto/x509/pkix".AttributeTypeAndValue } + func (@"crypto/x509/pkix".n·1 *@"crypto/x509/pkix".Name) FillFromRDNSequence (@"crypto/x509/pkix".rdns·2 *@"crypto/x509/pkix".RDNSequence "esc:0x0") + func (@"crypto/x509/pkix".n·2 @"crypto/x509/pkix".Name) ToRDNSequence () (@"crypto/x509/pkix".ret·1 @"crypto/x509/pkix".RDNSequence) + type @"crypto/x509".KeyUsage int + type @"crypto/x509/pkix".Extension struct { Id @"encoding/asn1".ObjectIdentifier; Critical bool "asn1:\"optional\""; Value []byte } + type @"crypto/x509".ExtKeyUsage int + type @"net".IPMask []byte + func (@"net".m·3 @"net".IPMask "esc:0x0") Size () (@"net".ones·1 int, @"net".bits·2 int) + func (@"net".m·2 @"net".IPMask "esc:0x0") String () (? string) + type @"net".IP []byte + func (@"net".ip·2 @"net".IP "esc:0x0") DefaultMask () (? @"net".IPMask) + func (@"net".ip·2 @"net".IP "esc:0x0") Equal (@"net".x·3 @"net".IP "esc:0x0") (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsGlobalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsInterfaceLocalMulticast () (? bool) { return len(@"net".ip·2) == 0x10 && @"net".ip·2[0x0] == byte(0xFF) && @"net".ip·2[0x1] & byte(0xF) == byte(0x1) } + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLinkLocalUnicast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsLoopback () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsMulticast () (? bool) + func (@"net".ip·2 @"net".IP "esc:0x0") IsUnspecified () (? bool) + func (@"net".ip·3 @"net".IP "esc:0x0") MarshalText () (? []byte, ? error) + func (@"net".ip·2 @"net".IP "esc:0x0") Mask (@"net".mask·3 @"net".IPMask "esc:0x0") (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x0") String () (? string) + func (@"net".ip·2 @"net".IP "esc:0x2") To16 () (? @"net".IP) + func (@"net".ip·2 @"net".IP "esc:0x2") To4 () (? @"net".IP) + func (@"net".ip·2 *@"net".IP "esc:0x0") UnmarshalText (@"net".text·3 []byte "esc:0x0") (? error) + type @"encoding/asn1".RawContent []byte + type @"encoding/asn1".RawValue struct { Class int; Tag int; IsCompound bool; Bytes []byte; FullBytes []byte } + type @"crypto/x509/pkix".AlgorithmIdentifier struct { Algorithm @"encoding/asn1".ObjectIdentifier; Parameters @"encoding/asn1".RawValue "asn1:\"optional\"" } + type @"crypto/x509/pkix".RevokedCertificate struct { SerialNumber *@"math/big".Int; RevocationTime @"time".Time; Extensions []@"crypto/x509/pkix".Extension "asn1:\"optional\"" } + type @"crypto/x509/pkix".TBSCertificateList struct { Raw @"encoding/asn1".RawContent; Version int "asn1:\"optional,default:2\""; Signature @"crypto/x509/pkix".AlgorithmIdentifier; Issuer @"crypto/x509/pkix".RDNSequence; ThisUpdate @"time".Time; NextUpdate @"time".Time "asn1:\"optional\""; RevokedCertificates []@"crypto/x509/pkix".RevokedCertificate "asn1:\"optional\""; Extensions []@"crypto/x509/pkix".Extension "asn1:\"tag:0,optional,explicit\"" } + type @"encoding/asn1".BitString struct { Bytes []byte; BitLength int } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x0") At (@"encoding/asn1".i·3 int) (? int) { if @"encoding/asn1".i·3 < 0x0 || @"encoding/asn1".i·3 >= @"encoding/asn1".b·2.BitLength { return 0x0 }; var @"encoding/asn1".x·4 int; ; @"encoding/asn1".x·4 = @"encoding/asn1".i·3 / 0x8; var @"encoding/asn1".y·5 uint; ; @"encoding/asn1".y·5 = 0x7 - uint(@"encoding/asn1".i·3 % 0x8); return int(@"encoding/asn1".b·2.Bytes[@"encoding/asn1".x·4] >> @"encoding/asn1".y·5) & 0x1 } + func (@"encoding/asn1".b·2 @"encoding/asn1".BitString "esc:0x2") RightAlign () (? []byte) + type @"crypto/x509/pkix".CertificateList struct { TBSCertList @"crypto/x509/pkix".TBSCertificateList; SignatureAlgorithm @"crypto/x509/pkix".AlgorithmIdentifier; SignatureValue @"encoding/asn1".BitString } + func (@"crypto/x509/pkix".certList·2 *@"crypto/x509/pkix".CertificateList "esc:0x0") HasExpired (@"crypto/x509/pkix".now·3 @"time".Time "esc:0x0") (? bool) + type @"io".Reader interface { Read(@"io".p []byte) (@"io".n int, @"io".err error) } + type @"crypto/x509".CertPool struct { @"crypto/x509".bySubjectKeyId map[string][]int; @"crypto/x509".byName map[string][]int; @"crypto/x509".certs []*@"crypto/x509".Certificate } + func (@"crypto/x509".s·1 *@"crypto/x509".CertPool) AddCert (@"crypto/x509".cert·2 *@"crypto/x509".Certificate) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool) AppendCertsFromPEM (@"crypto/x509".pemCerts·3 []byte) (@"crypto/x509".ok·1 bool) + func (@"crypto/x509".s·2 *@"crypto/x509".CertPool "esc:0x0") Subjects () (@"crypto/x509".res·1 [][]byte) + func (@"crypto/x509".s·4 *@"crypto/x509".CertPool "esc:0x0") @"crypto/x509".findVerifiedParents (@"crypto/x509".cert·5 *@"crypto/x509".Certificate) (@"crypto/x509".parents·1 []int, @"crypto/x509".errCert·2 *@"crypto/x509".Certificate, @"crypto/x509".err·3 error) + type @"crypto/x509".VerifyOptions struct { DNSName string; Intermediates *@"crypto/x509".CertPool; Roots *@"crypto/x509".CertPool; CurrentTime @"time".Time; KeyUsages []@"crypto/x509".ExtKeyUsage } + type @"crypto/x509".Certificate struct { Raw []byte; RawTBSCertificate []byte; RawSubjectPublicKeyInfo []byte; RawSubject []byte; RawIssuer []byte; Signature []byte; SignatureAlgorithm @"crypto/x509".SignatureAlgorithm; PublicKeyAlgorithm @"crypto/x509".PublicKeyAlgorithm; PublicKey interface {}; Version int; SerialNumber *@"math/big".Int; Issuer @"crypto/x509/pkix".Name; Subject @"crypto/x509/pkix".Name; NotBefore @"time".Time; NotAfter @"time".Time; KeyUsage @"crypto/x509".KeyUsage; Extensions []@"crypto/x509/pkix".Extension; ExtraExtensions []@"crypto/x509/pkix".Extension; ExtKeyUsage []@"crypto/x509".ExtKeyUsage; UnknownExtKeyUsage []@"encoding/asn1".ObjectIdentifier; BasicConstraintsValid bool; IsCA bool; MaxPathLen int; MaxPathLenZero bool; SubjectKeyId []byte; AuthorityKeyId []byte; OCSPServer []string; IssuingCertificateURL []string; DNSNames []string; EmailAddresses []string; IPAddresses []@"net".IP; PermittedDNSDomainsCritical bool; PermittedDNSDomains []string; CRLDistributionPoints []string; PolicyIdentifiers []@"encoding/asn1".ObjectIdentifier } + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckCRLSignature (@"crypto/x509".crl·3 *@"crypto/x509/pkix".CertificateList) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignature (@"crypto/x509".algo·3 @"crypto/x509".SignatureAlgorithm, @"crypto/x509".signed·4 []byte, @"crypto/x509".signature·5 []byte) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate) CheckSignatureFrom (@"crypto/x509".parent·3 *@"crypto/x509".Certificate) (@"crypto/x509".err·1 error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) CreateCRL (@"crypto/x509".rand·4 @"io".Reader, @"crypto/x509".priv·5 interface {}, @"crypto/x509".revokedCerts·6 []@"crypto/x509/pkix".RevokedCertificate, @"crypto/x509".now·7 @"time".Time, @"crypto/x509".expiry·8 @"time".Time) (@"crypto/x509".crlBytes·1 []byte, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x0") Equal (@"crypto/x509".other·3 *@"crypto/x509".Certificate "esc:0x0") (? bool) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) Verify (@"crypto/x509".opts·4 @"crypto/x509".VerifyOptions "esc:0x4") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") VerifyHostname (@"crypto/x509".h·3 string "esc:0x2") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate) @"crypto/x509".buildChains (@"crypto/x509".cache·4 map[int][][]*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".currentChain·5 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·6 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) + func (@"crypto/x509".c·2 *@"crypto/x509".Certificate "esc:0x2") @"crypto/x509".isValid (@"crypto/x509".certType·3 int, @"crypto/x509".currentChain·4 []*@"crypto/x509".Certificate "esc:0x0", @"crypto/x509".opts·5 *@"crypto/x509".VerifyOptions "esc:0x0") (? error) + func (@"crypto/x509".c·3 *@"crypto/x509".Certificate "esc:0x0") @"crypto/x509".systemVerify (@"crypto/x509".opts·4 *@"crypto/x509".VerifyOptions "esc:0x0") (@"crypto/x509".chains·1 [][]*@"crypto/x509".Certificate, @"crypto/x509".err·2 error) { return nil, nil } + type @"crypto/tls".ConnectionState struct { Version uint16; HandshakeComplete bool; DidResume bool; CipherSuite uint16; NegotiatedProtocol string; NegotiatedProtocolIsMutual bool; ServerName string; PeerCertificates []*@"crypto/x509".Certificate; VerifiedChains [][]*@"crypto/x509".Certificate; TLSUnique []byte } + type @"net/http".Cookie struct { Name string; Value string; Path string; Domain string; Expires @"time".Time; RawExpires string; MaxAge int; Secure bool; HttpOnly bool; Raw string; Unparsed []string } + func (@"net/http".c·2 *@"net/http".Cookie) String () (? string) + import bufio "bufio" // indirect + type @"bufio".Reader struct { @"bufio".buf []byte; @"bufio".rd @"io".Reader; @"bufio".r int; @"bufio".w int; @"bufio".err error; @"bufio".lastByte int; @"bufio".lastRuneSize int } + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") Buffered () (? int) { return @"bufio".b·2.@"bufio".w - @"bufio".b·2.@"bufio".r } + func (@"bufio".b·3 *@"bufio".Reader) Peek (@"bufio".n·4 int) (? []byte, ? error) + func (@"bufio".b·3 *@"bufio".Reader) Read (@"bufio".p·4 []byte) (@"bufio".n·1 int, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadByte () (@"bufio".c·1 byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadBytes (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadLine () (@"bufio".line·1 []byte, @"bufio".isPrefix·2 bool, @"bufio".err·3 error) + func (@"bufio".b·4 *@"bufio".Reader) ReadRune () (@"bufio".r·1 rune, @"bufio".size·2 int, @"bufio".err·3 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadSlice (@"bufio".delim·4 byte) (@"bufio".line·1 []byte, @"bufio".err·2 error) + func (@"bufio".b·3 *@"bufio".Reader) ReadString (@"bufio".delim·4 byte) (@"bufio".line·1 string, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) Reset (@"bufio".r·2 @"io".Reader) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadByte () (? error) + func (@"bufio".b·2 *@"bufio".Reader "esc:0x0") UnreadRune () (? error) { if @"bufio".b·2.@"bufio".lastRuneSize < 0x0 || @"bufio".b·2.@"bufio".r < @"bufio".b·2.@"bufio".lastRuneSize { return @"bufio".ErrInvalidUnreadRune }; @"bufio".b·2.@"bufio".r -= @"bufio".b·2.@"bufio".lastRuneSize; @"bufio".b·2.@"bufio".lastByte = -0x1; @"bufio".b·2.@"bufio".lastRuneSize = -0x1; return nil } + func (@"bufio".b·3 *@"bufio".Reader) WriteTo (@"bufio".w·4 @"io".Writer) (@"bufio".n·1 int64, @"bufio".err·2 error) + func (@"bufio".b·1 *@"bufio".Reader) @"bufio".fill () + func (@"bufio".b·2 *@"bufio".Reader "esc:0x1") @"bufio".readErr () (? error) { var @"bufio".err·3 error; ; @"bufio".err·3 = @"bufio".b·2.@"bufio".err; @"bufio".b·2.@"bufio".err = nil; return @"bufio".err·3 } + func (@"bufio".b·1 *@"bufio".Reader "esc:0x0") @"bufio".reset (@"bufio".buf·2 []byte, @"bufio".r·3 @"io".Reader) { *@"bufio".b·1 = (@"bufio".Reader{ @"bufio".buf:@"bufio".buf·2, @"bufio".rd:@"bufio".r·3, @"bufio".lastByte:-0x1, @"bufio".lastRuneSize:-0x1 }) } + func (@"bufio".b·3 *@"bufio".Reader) @"bufio".writeBuf (@"bufio".w·4 @"io".Writer) (? int64, ? error) + import bytes "bytes" // indirect + type @"bytes".readOp int + type @"bytes".Buffer struct { @"bytes".buf []byte; @"bytes".off int; @"bytes".runeBytes [4]byte; @"bytes".bootstrap [64]byte; @"bytes".lastRead @"bytes".readOp } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Bytes () (? []byte) { return @"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:] } + func (@"bytes".b·1 *@"bytes".Buffer) Grow (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") Len () (? int) { return len(@"bytes".b·2.@"bytes".buf) - @"bytes".b·2.@"bytes".off } + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x1") Next (@"bytes".n·3 int) (? []byte) + func (@"bytes".b·3 *@"bytes".Buffer) Read (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadByte () (@"bytes".c·1 byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadBytes (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) ReadFrom (@"bytes".r·4 @"io".Reader) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·4 *@"bytes".Buffer) ReadRune () (@"bytes".r·1 rune, @"bytes".size·2 int, @"bytes".err·3 error) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x0") ReadString (@"bytes".delim·4 byte) (@"bytes".line·1 string, @"bytes".err·2 error) + func (@"bytes".b·1 *@"bytes".Buffer) Reset () + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") String () (? string) { if @"bytes".b·2 == nil { return "" }; return string(@"bytes".b·2.@"bytes".buf[@"bytes".b·2.@"bytes".off:]) } + func (@"bytes".b·1 *@"bytes".Buffer) Truncate (@"bytes".n·2 int) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadByte () (? error) + func (@"bytes".b·2 *@"bytes".Buffer "esc:0x0") UnreadRune () (? error) + func (@"bytes".b·3 *@"bytes".Buffer) Write (@"bytes".p·4 []byte "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) WriteByte (@"bytes".c·3 byte) (? error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteRune (@"bytes".r·4 rune) (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteString (@"bytes".s·4 string "esc:0x0") (@"bytes".n·1 int, @"bytes".err·2 error) + func (@"bytes".b·3 *@"bytes".Buffer) WriteTo (@"bytes".w·4 @"io".Writer) (@"bytes".n·1 int64, @"bytes".err·2 error) + func (@"bytes".b·2 *@"bytes".Buffer) @"bytes".grow (@"bytes".n·3 int) (? int) + func (@"bytes".b·3 *@"bytes".Buffer "esc:0x1") @"bytes".readSlice (@"bytes".delim·4 byte) (@"bytes".line·1 []byte, @"bytes".err·2 error) + type @"mime/multipart".Part struct { Header @"net/textproto".MIMEHeader; @"mime/multipart".buffer *@"bytes".Buffer; @"mime/multipart".mr *@"mime/multipart".Reader; @"mime/multipart".bytesRead int; @"mime/multipart".disposition string; @"mime/multipart".dispositionParams map[string]string; @"mime/multipart".r @"io".Reader } + func (@"mime/multipart".p·2 *@"mime/multipart".Part) Close () (? error) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FileName () (? string) + func (@"mime/multipart".p·2 *@"mime/multipart".Part "esc:0x0") FormName () (? string) + func (@"mime/multipart".p·3 *@"mime/multipart".Part) Read (@"mime/multipart".d·4 []byte) (@"mime/multipart".n·1 int, @"mime/multipart".err·2 error) + func (@"mime/multipart".p·1 *@"mime/multipart".Part "esc:0x0") @"mime/multipart".parseContentDisposition () + func (@"mime/multipart".bp·2 *@"mime/multipart".Part) @"mime/multipart".populateHeaders () (? error) + type @"mime/multipart".Reader struct { @"mime/multipart".bufReader *@"bufio".Reader; @"mime/multipart".currentPart *@"mime/multipart".Part; @"mime/multipart".partsRead int; @"mime/multipart".nl []byte; @"mime/multipart".nlDashBoundary []byte; @"mime/multipart".dashBoundaryDash []byte; @"mime/multipart".dashBoundary []byte } + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) NextPart () (? *@"mime/multipart".Part, ? error) + func (@"mime/multipart".r·3 *@"mime/multipart".Reader) ReadForm (@"mime/multipart".maxMemory·4 int64) (@"mime/multipart".f·1 *@"mime/multipart".Form, @"mime/multipart".err·2 error) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader) @"mime/multipart".isBoundaryDelimiterLine (@"mime/multipart".line·3 []byte "esc:0x0") (@"mime/multipart".ret·1 bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".isFinalBoundary (@"mime/multipart".line·3 []byte "esc:0x0") (? bool) + func (@"mime/multipart".mr·2 *@"mime/multipart".Reader "esc:0x0") @"mime/multipart".peekBufferIsEmptyPart (@"mime/multipart".peek·3 []byte "esc:0x0") (? bool) + type @"net/http".Request struct { Method string; URL *@"net/url".URL; Proto string; ProtoMajor int; ProtoMinor int; Header @"net/http".Header; Body @"io".ReadCloser; ContentLength int64; TransferEncoding []string; Close bool; Host string; Form @"net/url".Values; PostForm @"net/url".Values; MultipartForm *@"mime/multipart".Form; Trailer @"net/http".Header; RemoteAddr string; RequestURI string; TLS *@"crypto/tls".ConnectionState } + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") AddCookie (@"net/http".c·2 *@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request "esc:0x0") BasicAuth () (@"net/http".username·1 string, @"net/http".password·2 string, @"net/http".ok·3 bool) + func (@"net/http".r·3 *@"net/http".Request "esc:0x0") Cookie (@"net/http".name·4 string "esc:0x0") (? *@"net/http".Cookie, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Cookies () (? []*@"net/http".Cookie) + func (@"net/http".r·4 *@"net/http".Request) FormFile (@"net/http".key·5 string "esc:0x0") (? @"mime/multipart".File, ? *@"mime/multipart".FileHeader, ? error) + func (@"net/http".r·2 *@"net/http".Request) FormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·3 *@"net/http".Request) MultipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request) ParseForm () (? error) + func (@"net/http".r·2 *@"net/http".Request) ParseMultipartForm (@"net/http".maxMemory·3 int64) (? error) + func (@"net/http".r·2 *@"net/http".Request) PostFormValue (@"net/http".key·3 string "esc:0x0") (? string) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") ProtoAtLeast (@"net/http".major·3 int, @"net/http".minor·4 int) (? bool) { return @"net/http".r·2.ProtoMajor > @"net/http".major·3 || @"net/http".r·2.ProtoMajor == @"net/http".major·3 && @"net/http".r·2.ProtoMinor >= @"net/http".minor·4 } + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") Referer () (? string) + func (@"net/http".r·1 *@"net/http".Request "esc:0x0") SetBasicAuth (@"net/http".username·2 string "esc:0x0", @"net/http".password·3 string "esc:0x0") + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") UserAgent () (? string) + func (@"net/http".r·2 *@"net/http".Request) Write (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·2 *@"net/http".Request) WriteProxy (@"net/http".w·3 @"io".Writer) (? error) + func (@"net/http".r·1 *@"net/http".Request) @"net/http".closeBody () + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".expectsContinue () (? bool) + func (@"net/http".r·3 *@"net/http".Request) @"net/http".multipartReader () (? *@"mime/multipart".Reader, ? error) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsClose () (? bool) + func (@"net/http".r·2 *@"net/http".Request "esc:0x0") @"net/http".wantsHttp10KeepAlive () (? bool) + func (@"net/http".req·2 *@"net/http".Request) @"net/http".write (@"net/http".w·3 @"io".Writer, @"net/http".usingProxy·4 bool, @"net/http".extraHeaders·5 @"net/http".Header "esc:0x0") (? error) + type @"net/http".Handler interface { ServeHTTP(? @"net/http".ResponseWriter, ? *@"net/http".Request) } + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch struct { Route *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route; Handler @"net/http".Handler; Vars map[string]string } + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matcher interface { Match(? *@"net/http".Request, ? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch) (? bool) } + import syntax "regexp/syntax" // indirect + type @"regexp/syntax".InstOp uint8 + func (@"regexp/syntax".i·2 @"regexp/syntax".InstOp) String () (? string) { if uint(@"regexp/syntax".i·2) >= uint(len(@"regexp/syntax".instOpNames)) { return "" }; return @"regexp/syntax".instOpNames[@"regexp/syntax".i·2] } + type @"regexp/syntax".Inst struct { Op @"regexp/syntax".InstOp; Out uint32; Arg uint32; Rune []rune } + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchEmptyWidth (@"regexp/syntax".before·3 rune, @"regexp/syntax".after·4 rune) (? bool) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchRune (@"regexp/syntax".r·3 rune) (? bool) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") MatchRunePos (@"regexp/syntax".r·3 rune) (? int) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") String () (? string) + func (@"regexp/syntax".i·2 *@"regexp/syntax".Inst "esc:0x0") @"regexp/syntax".op () (? @"regexp/syntax".InstOp) + type @"regexp/syntax".EmptyOp uint8 + type @"regexp/syntax".Prog struct { Inst []@"regexp/syntax".Inst; Start int; NumCap int } + func (@"regexp/syntax".p·3 *@"regexp/syntax".Prog "esc:0x0") Prefix () (@"regexp/syntax".prefix·1 string, @"regexp/syntax".complete·2 bool) + func (@"regexp/syntax".p·2 *@"regexp/syntax".Prog "esc:0x0") StartCond () (? @"regexp/syntax".EmptyOp) + func (@"regexp/syntax".p·2 *@"regexp/syntax".Prog "esc:0x0") String () (? string) + func (@"regexp/syntax".p·3 *@"regexp/syntax".Prog "esc:0x1") @"regexp/syntax".skipNop (@"regexp/syntax".pc·4 uint32) (? *@"regexp/syntax".Inst, ? uint32) + type @"regexp".onePassInst struct { ? @"regexp/syntax".Inst; Next []uint32 } + type @"regexp".onePassProg struct { Inst []@"regexp".onePassInst; Start int; NumCap int } + type @"regexp".thread struct { @"regexp".inst *@"regexp/syntax".Inst; @"regexp".cap []int } + type @"regexp".entry struct { @"regexp".pc uint32; @"regexp".t *@"regexp".thread } + type @"regexp".queue struct { @"regexp".sparse []uint32; @"regexp".dense []@"regexp".entry } + type @"regexp".inputBytes struct { @"regexp".str []byte } + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return true } + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) + func (@"regexp".i·2 *@"regexp".inputBytes "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) + func (@"regexp".i·3 *@"regexp".inputBytes "esc:0x0") @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"regexp".inputString struct { @"regexp".str string } + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return true } + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) + func (@"regexp".i·2 *@"regexp".inputString "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) + func (@"regexp".i·3 *@"regexp".inputString "esc:0x0") @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"io".RuneReader interface { ReadRune() (@"io".r rune, @"io".size int, @"io".err error) } + type @"regexp".inputReader struct { @"regexp".r @"io".RuneReader; @"regexp".atEOT bool; @"regexp".pos int } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".canCheckPrefix () (? bool) { return false } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".context (@"regexp".pos·3 int) (? @"regexp/syntax".EmptyOp) { return @"regexp/syntax".EmptyOp(0x0) } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".hasPrefix (@"regexp".re·3 *@"regexp".Regexp "esc:0x0") (? bool) { return false } + func (@"regexp".i·2 *@"regexp".inputReader "esc:0x0") @"regexp".index (@"regexp".re·3 *@"regexp".Regexp "esc:0x0", @"regexp".pos·4 int) (? int) { return -0x1 } + func (@"regexp".i·3 *@"regexp".inputReader) @"regexp".step (@"regexp".pos·4 int) (? rune, ? int) + type @"regexp".input interface { @"regexp".canCheckPrefix() (? bool); @"regexp".context(@"regexp".pos int) (? @"regexp/syntax".EmptyOp); @"regexp".hasPrefix(@"regexp".re *@"regexp".Regexp) (? bool); @"regexp".index(@"regexp".re *@"regexp".Regexp, @"regexp".pos int) (? int); @"regexp".step(@"regexp".pos int) (@"regexp".r rune, @"regexp".width int) } + type @"regexp".machine struct { @"regexp".re *@"regexp".Regexp; @"regexp".p *@"regexp/syntax".Prog; @"regexp".op *@"regexp".onePassProg; @"regexp".q0 @"regexp".queue; @"regexp".q1 @"regexp".queue; @"regexp".pool []*@"regexp".thread; @"regexp".matched bool; @"regexp".matchcap []int; @"regexp".inputBytes @"regexp".inputBytes; @"regexp".inputString @"regexp".inputString; @"regexp".inputReader @"regexp".inputReader } + func (@"regexp".m·2 *@"regexp".machine) @"regexp".add (@"regexp".q·3 *@"regexp".queue, @"regexp".pc·4 uint32, @"regexp".pos·5 int, @"regexp".cap·6 []int "esc:0x0", @"regexp".cond·7 @"regexp/syntax".EmptyOp, @"regexp".t·8 *@"regexp".thread) (? *@"regexp".thread) + func (@"regexp".m·2 *@"regexp".machine) @"regexp".alloc (@"regexp".i·3 *@"regexp/syntax".Inst) (? *@"regexp".thread) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".clear (@"regexp".q·2 *@"regexp".queue) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".free (@"regexp".t·2 *@"regexp".thread) { @"regexp".m·1.@"regexp".inputBytes.@"regexp".str = nil; @"regexp".m·1.@"regexp".inputString.@"regexp".str = ""; @"regexp".m·1.@"regexp".inputReader.@"regexp".r = nil; @"regexp".m·1.@"regexp".pool = append(@"regexp".m·1.@"regexp".pool, @"regexp".t·2) } + func (@"regexp".m·1 *@"regexp".machine) @"regexp".init (@"regexp".ncap·2 int) + func (@"regexp".m·2 *@"regexp".machine) @"regexp".match (@"regexp".i·3 @"regexp".input, @"regexp".pos·4 int) (? bool) + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputBytes (@"regexp".b·3 []byte) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputBytes.@"regexp".str = @"regexp".b·3; return &@"regexp".m·2.@"regexp".inputBytes } + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputReader (@"regexp".r·3 @"io".RuneReader) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputReader.@"regexp".r = @"regexp".r·3; @"regexp".m·2.@"regexp".inputReader.@"regexp".atEOT = false; @"regexp".m·2.@"regexp".inputReader.@"regexp".pos = 0x0; return &@"regexp".m·2.@"regexp".inputReader } + func (@"regexp".m·2 *@"regexp".machine "esc:0x2") @"regexp".newInputString (@"regexp".s·3 string) (? @"regexp".input) { @"regexp".m·2.@"regexp".inputString.@"regexp".str = @"regexp".s·3; return &@"regexp".m·2.@"regexp".inputString } + func (@"regexp".m·2 *@"regexp".machine) @"regexp".onepass (@"regexp".i·3 @"regexp".input, @"regexp".pos·4 int) (? bool) + func (@"regexp".m·1 *@"regexp".machine) @"regexp".step (@"regexp".runq·2 *@"regexp".queue, @"regexp".nextq·3 *@"regexp".queue, @"regexp".pos·4 int, @"regexp".nextPos·5 int, @"regexp".c·6 rune, @"regexp".nextCond·7 @"regexp/syntax".EmptyOp) + type @"regexp".Regexp struct { @"regexp".expr string; @"regexp".prog *@"regexp/syntax".Prog; @"regexp".onepass *@"regexp".onePassProg; @"regexp".prefix string; @"regexp".prefixBytes []byte; @"regexp".prefixComplete bool; @"regexp".prefixRune rune; @"regexp".prefixEnd uint32; @"regexp".cond @"regexp/syntax".EmptyOp; @"regexp".numSubexp int; @"regexp".subexpNames []string; @"regexp".longest bool; @"regexp".mu @"sync".Mutex; @"regexp".machine []*@"regexp".machine } + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") Expand (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 []byte "esc:0x0", @"regexp".src·5 []byte "esc:0x0", @"regexp".match·6 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") ExpandString (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 string, @"regexp".src·5 string "esc:0x0", @"regexp".match·6 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) Find (@"regexp".b·3 []byte) (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAll (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllIndex (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllString (@"regexp".s·3 string, @"regexp".n·4 int) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringIndex (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringSubmatch (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]string) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllStringSubmatchIndex (@"regexp".s·3 string, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllSubmatch (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindAllSubmatchIndex (@"regexp".b·3 []byte, @"regexp".n·4 int) (? [][]int) + func (@"regexp".re·2 *@"regexp".Regexp) FindIndex (@"regexp".b·3 []byte) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindReaderIndex (@"regexp".r·3 @"io".RuneReader) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindReaderSubmatchIndex (@"regexp".r·3 @"io".RuneReader) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindString (@"regexp".s·3 string) (? string) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringIndex (@"regexp".s·3 string) (@"regexp".loc·1 []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringSubmatch (@"regexp".s·3 string) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp) FindStringSubmatchIndex (@"regexp".s·3 string) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp) FindSubmatch (@"regexp".b·3 []byte) (? [][]byte) + func (@"regexp".re·2 *@"regexp".Regexp) FindSubmatchIndex (@"regexp".b·3 []byte) (? []int) + func (@"regexp".re·3 *@"regexp".Regexp "esc:0x1") LiteralPrefix () (@"regexp".prefix·1 string, @"regexp".complete·2 bool) { return @"regexp".re·3.@"regexp".prefix, @"regexp".re·3.@"regexp".prefixComplete } + func (@"regexp".re·1 *@"regexp".Regexp "esc:0x0") Longest () { @"regexp".re·1.@"regexp".longest = true } + func (@"regexp".re·2 *@"regexp".Regexp) Match (@"regexp".b·3 []byte) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp) MatchReader (@"regexp".r·3 @"io".RuneReader) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp) MatchString (@"regexp".s·3 string) (? bool) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") NumSubexp () (? int) { return @"regexp".re·2.@"regexp".numSubexp } + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAll (@"regexp".src·3 []byte, @"regexp".repl·4 []byte "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllFunc (@"regexp".src·3 []byte, @"regexp".repl·4 func(? []byte) (? []byte) "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllLiteral (@"regexp".src·3 []byte, @"regexp".repl·4 []byte "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllLiteralString (@"regexp".src·3 string, @"regexp".repl·4 string "esc:0x0") (? string) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllString (@"regexp".src·3 string, @"regexp".repl·4 string) (? string) + func (@"regexp".re·2 *@"regexp".Regexp) ReplaceAllStringFunc (@"regexp".src·3 string, @"regexp".repl·4 func(? string) (? string) "esc:0x0") (? string) + func (@"regexp".re·2 *@"regexp".Regexp) Split (@"regexp".s·3 string, @"regexp".n·4 int) (? []string) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x1") String () (? string) { return @"regexp".re·2.@"regexp".expr } + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x1") SubexpNames () (? []string) { return @"regexp".re·2.@"regexp".subexpNames } + func (@"regexp".re·1 *@"regexp".Regexp) @"regexp".allMatches (@"regexp".s·2 string, @"regexp".b·3 []byte, @"regexp".n·4 int, @"regexp".deliver·5 func(? []int) "esc:0x0") + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".doExecute (@"regexp".r·3 @"io".RuneReader, @"regexp".b·4 []byte, @"regexp".s·5 string, @"regexp".pos·6 int, @"regexp".ncap·7 int) (? []int) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") @"regexp".expand (@"regexp".dst·3 []byte "esc:0x2", @"regexp".template·4 string, @"regexp".bsrc·5 []byte "esc:0x0", @"regexp".src·6 string "esc:0x0", @"regexp".match·7 []int "esc:0x0") (? []byte) + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".get () (? *@"regexp".machine) + func (@"regexp".re·2 *@"regexp".Regexp "esc:0x0") @"regexp".pad (@"regexp".a·3 []int "esc:0x2") (? []int) + func (@"regexp".re·1 *@"regexp".Regexp) @"regexp".put (@"regexp".z·2 *@"regexp".machine) + func (@"regexp".re·2 *@"regexp".Regexp) @"regexp".replaceAll (@"regexp".bsrc·3 []byte, @"regexp".src·4 string, @"regexp".nmatch·5 int, @"regexp".repl·6 func(@"regexp".dst []byte, @"regexp".m []int) (? []byte) "esc:0x0") (? []byte) + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp struct { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".template string; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchHost bool; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchQuery bool; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".strictSlash bool; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".regexp *@"regexp".Regexp; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".reverse string; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".varsN []string; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".varsR []*@"regexp".Regexp } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp) Match (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·3 *@"net/http".Request, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".match·4 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch "esc:0x0") (? bool) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp "esc:0x0") @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getUrlQuery (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·3 *@"net/http".Request) (? string) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchQueryString (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·3 *@"net/http".Request) (? bool) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".url (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".values·4 map[string]string "esc:0x0") (? string, ? error) + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexpGroup struct { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".host *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".path *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".queries []*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexp } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".v·1 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexpGroup) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".setMatch (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·2 *@"net/http".Request, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".m·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch "esc:0x0", @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·4 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x0") + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".BuildVarsFunc func(? map[string]string) (? map[string]string) + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".MatcherFunc func(? *@"net/http".Request, ? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch) (? bool) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".m·2 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".MatcherFunc "esc:0x0") Match (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·3 *@"net/http".Request, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".match·4 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch) (? bool) + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route struct { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".parent @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".parentRoute; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".handler @"net/http".Handler; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchers []@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matcher; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".regexp *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexpGroup; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".strictSlash bool; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildOnly bool; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".name string; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".err error; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildVarsFunc @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".BuildVarsFunc } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x2") BuildOnly () (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildOnly = true; return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x2") BuildVarsFunc (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".BuildVarsFunc) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildVarsFunc = @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·3; return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x1") GetError () (? error) { return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".err } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x1") GetHandler () (? @"net/http".Handler) { return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".handler } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x1") GetName () (? string) { return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".name } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x2") Handler (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".handler·3 @"net/http".Handler) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) { if @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".err == nil { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".handler = @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".handler·3 }; return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route "esc:0x2") HandlerFunc (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·3 func(? @"net/http".ResponseWriter, ? *@"net/http".Request)) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Headers (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) HeadersRegexp (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Host (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Match (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·3 *@"net/http".Request, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".match·4 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch) (? bool) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) MatcherFunc (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".MatcherFunc) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Methods (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".methods·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Name (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".name·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Path (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) PathPrefix (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Queries (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Schemes (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".schemes·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) Subrouter () (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) URL (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·4 ...string) (? *@"net/url".URL, ? error) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) URLHost (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·4 ...string) (? *@"net/url".URL, ? error) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) URLPath (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·4 ...string) (? *@"net/url".URL, ? error) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".addMatcher (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".m·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matcher) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) { if @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".err == nil { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchers = append(@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchers, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".m·3) }; return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".addRegexpMatcher (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchHost·4 bool, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchPrefix·5 bool, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".matchQuery·6 bool) (? error) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildVars (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".m·3 map[string]string) (? map[string]string) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getNamedRoutes () (? map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getRegexpGroup () (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexpGroup) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".prepareVars (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·4 ...string) (? map[string]string, ? error) + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".parentRoute interface { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildVars(? map[string]string) (? map[string]string); @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getNamedRoutes() (? map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route); @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getRegexpGroup() (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexpGroup) } + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".WalkFunc func(@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".route *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".router *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".ancestors []*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) (? error) + type @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router struct { NotFoundHandler @"net/http".Handler; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".parent @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".parentRoute; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routes []*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".namedRoutes map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".strictSlash bool; KeepContext bool } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) BuildVarsFunc (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".BuildVarsFunc) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Get (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".name·3 string "esc:0x0") (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) GetRoute (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".name·3 string "esc:0x0") (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Handle (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".path·3 string, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".handler·4 @"net/http".Handler) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) HandleFunc (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".path·3 string, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·4 func(? @"net/http".ResponseWriter, ? *@"net/http".Request)) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Headers (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Host (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router "esc:0x0") Match (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·3 *@"net/http".Request, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".match·4 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".RouteMatch) (? bool) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) MatcherFunc (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".f·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".MatcherFunc) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Methods (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".methods·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) NewRoute () (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) { var @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".route·3 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route; ; @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".route·3 = (&@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route{ @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".parent:@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".strictSlash:@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".strictSlash }); @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routes = append(@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routes, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".route·3); return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".route·3 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Path (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) PathPrefix (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".tpl·3 string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Queries (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".pairs·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Schemes (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".schemes·3 ...string) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·1 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) ServeHTTP (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".w·2 @"net/http".ResponseWriter, @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".req·3 *@"net/http".Request) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router "esc:0x2") StrictSlash (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".value·3 bool) (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) { @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2.@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".strictSlash = @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".value·3; return @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 } + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) Walk (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".walkFn·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".WalkFunc "esc:0x0") (? error) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".buildVars (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".m·3 map[string]string) (? map[string]string) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getNamedRoutes () (? map[string]*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".getRegexpGroup () (? *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".routeRegexpGroup) + func (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".r·2 *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router) @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".walk (@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".walkFn·3 @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".WalkFunc "esc:0x0", @"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".ancestors·4 []*@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Route) (? error) + type @"github.com/fsouza/go-dockerclient".BlkioStatsEntry struct { Major uint64 "json:\"major,omitempty\" yaml:\"major,omitempty\""; Minor uint64 "json:\"minor,omitempty\" yaml:\"minor,omitempty\""; Op string "json:\"op,omitempty\" yaml:\"op,omitempty\""; Value uint64 "json:\"value,omitempty\" yaml:\"value,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".CPUStats struct { CPUUsage struct { PercpuUsage []uint64 "json:\"percpu_usage,omitempty\" yaml:\"percpu_usage,omitempty\""; UsageInUsermode uint64 "json:\"usage_in_usermode,omitempty\" yaml:\"usage_in_usermode,omitempty\""; TotalUsage uint64 "json:\"total_usage,omitempty\" yaml:\"total_usage,omitempty\""; UsageInKernelmode uint64 "json:\"usage_in_kernelmode,omitempty\" yaml:\"usage_in_kernelmode,omitempty\"" } "json:\"cpu_usage,omitempty\" yaml:\"cpu_usage,omitempty\""; SystemCPUUsage uint64 "json:\"system_cpu_usage,omitempty\" yaml:\"system_cpu_usage,omitempty\""; ThrottlingData struct { Periods uint64 "json:\"periods,omitempty\""; ThrottledPeriods uint64 "json:\"throttled_periods,omitempty\""; ThrottledTime uint64 "json:\"throttled_time,omitempty\"" } "json:\"throttling_data,omitempty\" yaml:\"throttling_data,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".Stats struct { Read @"time".Time "json:\"read,omitempty\" yaml:\"read,omitempty\""; Network struct { RxDropped uint64 "json:\"rx_dropped,omitempty\" yaml:\"rx_dropped,omitempty\""; RxBytes uint64 "json:\"rx_bytes,omitempty\" yaml:\"rx_bytes,omitempty\""; RxErrors uint64 "json:\"rx_errors,omitempty\" yaml:\"rx_errors,omitempty\""; TxPackets uint64 "json:\"tx_packets,omitempty\" yaml:\"tx_packets,omitempty\""; TxDropped uint64 "json:\"tx_dropped,omitempty\" yaml:\"tx_dropped,omitempty\""; RxPackets uint64 "json:\"rx_packets,omitempty\" yaml:\"rx_packets,omitempty\""; TxErrors uint64 "json:\"tx_errors,omitempty\" yaml:\"tx_errors,omitempty\""; TxBytes uint64 "json:\"tx_bytes,omitempty\" yaml:\"tx_bytes,omitempty\"" } "json:\"network,omitempty\" yaml:\"network,omitempty\""; MemoryStats struct { Stats struct { TotalPgmafault uint64 "json:\"total_pgmafault,omitempty\" yaml:\"total_pgmafault,omitempty\""; Cache uint64 "json:\"cache,omitempty\" yaml:\"cache,omitempty\""; MappedFile uint64 "json:\"mapped_file,omitempty\" yaml:\"mapped_file,omitempty\""; TotalInactiveFile uint64 "json:\"total_inactive_file,omitempty\" yaml:\"total_inactive_file,omitempty\""; Pgpgout uint64 "json:\"pgpgout,omitempty\" yaml:\"pgpgout,omitempty\""; Rss uint64 "json:\"rss,omitempty\" yaml:\"rss,omitempty\""; TotalMappedFile uint64 "json:\"total_mapped_file,omitempty\" yaml:\"total_mapped_file,omitempty\""; Writeback uint64 "json:\"writeback,omitempty\" yaml:\"writeback,omitempty\""; Unevictable uint64 "json:\"unevictable,omitempty\" yaml:\"unevictable,omitempty\""; Pgpgin uint64 "json:\"pgpgin,omitempty\" yaml:\"pgpgin,omitempty\""; TotalUnevictable uint64 "json:\"total_unevictable,omitempty\" yaml:\"total_unevictable,omitempty\""; Pgmajfault uint64 "json:\"pgmajfault,omitempty\" yaml:\"pgmajfault,omitempty\""; TotalRss uint64 "json:\"total_rss,omitempty\" yaml:\"total_rss,omitempty\""; TotalRssHuge uint64 "json:\"total_rss_huge,omitempty\" yaml:\"total_rss_huge,omitempty\""; TotalWriteback uint64 "json:\"total_writeback,omitempty\" yaml:\"total_writeback,omitempty\""; TotalInactiveAnon uint64 "json:\"total_inactive_anon,omitempty\" yaml:\"total_inactive_anon,omitempty\""; RssHuge uint64 "json:\"rss_huge,omitempty\" yaml:\"rss_huge,omitempty\""; HierarchicalMemoryLimit uint64 "json:\"hierarchical_memory_limit,omitempty\" yaml:\"hierarchical_memory_limit,omitempty\""; TotalPgfault uint64 "json:\"total_pgfault,omitempty\" yaml:\"total_pgfault,omitempty\""; TotalActiveFile uint64 "json:\"total_active_file,omitempty\" yaml:\"total_active_file,omitempty\""; ActiveAnon uint64 "json:\"active_anon,omitempty\" yaml:\"active_anon,omitempty\""; TotalActiveAnon uint64 "json:\"total_active_anon,omitempty\" yaml:\"total_active_anon,omitempty\""; TotalPgpgout uint64 "json:\"total_pgpgout,omitempty\" yaml:\"total_pgpgout,omitempty\""; TotalCache uint64 "json:\"total_cache,omitempty\" yaml:\"total_cache,omitempty\""; InactiveAnon uint64 "json:\"inactive_anon,omitempty\" yaml:\"inactive_anon,omitempty\""; ActiveFile uint64 "json:\"active_file,omitempty\" yaml:\"active_file,omitempty\""; Pgfault uint64 "json:\"pgfault,omitempty\" yaml:\"pgfault,omitempty\""; InactiveFile uint64 "json:\"inactive_file,omitempty\" yaml:\"inactive_file,omitempty\""; TotalPgpgin uint64 "json:\"total_pgpgin,omitempty\" yaml:\"total_pgpgin,omitempty\"" } "json:\"stats,omitempty\" yaml:\"stats,omitempty\""; MaxUsage uint64 "json:\"max_usage,omitempty\" yaml:\"max_usage,omitempty\""; Usage uint64 "json:\"usage,omitempty\" yaml:\"usage,omitempty\""; Failcnt uint64 "json:\"failcnt,omitempty\" yaml:\"failcnt,omitempty\""; Limit uint64 "json:\"limit,omitempty\" yaml:\"limit,omitempty\"" } "json:\"memory_stats,omitempty\" yaml:\"memory_stats,omitempty\""; BlkioStats struct { IOServiceBytesRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_service_bytes_recursive,omitempty\" yaml:\"io_service_bytes_recursive,omitempty\""; IOServicedRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_serviced_recursive,omitempty\" yaml:\"io_serviced_recursive,omitempty\""; IOQueueRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_queue_recursive,omitempty\" yaml:\"io_queue_recursive,omitempty\""; IOServiceTimeRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_service_time_recursive,omitempty\" yaml:\"io_service_time_recursive,omitempty\""; IOWaitTimeRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_wait_time_recursive,omitempty\" yaml:\"io_wait_time_recursive,omitempty\""; IOMergedRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_merged_recursive,omitempty\" yaml:\"io_merged_recursive,omitempty\""; IOTimeRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"io_time_recursive,omitempty\" yaml:\"io_time_recursive,omitempty\""; SectorsRecursive []@"github.com/fsouza/go-dockerclient".BlkioStatsEntry "json:\"sectors_recursive,omitempty\" yaml:\"sectors_recursive,omitempty\"" } "json:\"blkio_stats,omitempty\" yaml:\"blkio_stats,omitempty\""; CPUStats @"github.com/fsouza/go-dockerclient".CPUStats "json:\"cpu_stats,omitempty\" yaml:\"cpu_stats,omitempty\""; PreCPUStats @"github.com/fsouza/go-dockerclient".CPUStats "json:\"precpu_stats,omitempty\"" } + type @"github.com/fsouza/go-dockerclient".APIEvents struct { Status string "json:\"Status,omitempty\" yaml:\"Status,omitempty\""; ID string "json:\"ID,omitempty\" yaml:\"ID,omitempty\""; From string "json:\"From,omitempty\" yaml:\"From,omitempty\""; Time int64 "json:\"Time,omitempty\" yaml:\"Time,omitempty\"" } + type @"".DockerServer struct { @"".containers []*@"github.com/fsouza/go-dockerclient".Container; @"".execs []*@"github.com/fsouza/go-dockerclient".ExecInspect; @"".execMut @"sync".RWMutex; @"".cMut @"sync".RWMutex; @"".images []@"github.com/fsouza/go-dockerclient".Image; @"".iMut @"sync".RWMutex; @"".imgIDs map[string]string; @"".networks []*@"github.com/fsouza/go-dockerclient".Network; @"".netMut @"sync".RWMutex; @"".listener @"net".Listener; @"".mux *@"github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux".Router; @"".hook func(? *@"net/http".Request); @"".failures map[string]string; @"".multiFailures []map[string]string; @"".execCallbacks map[string]func(); @"".statsCallbacks map[string]func(? string) (? @"github.com/fsouza/go-dockerclient".Stats); @"".customHandlers map[string]@"net/http".Handler; @"".handlerMutex @"sync".RWMutex; @"".cChan chan<- *@"github.com/fsouza/go-dockerclient".Container } + func (@"".s·1 *@"".DockerServer) CustomHandler (@"".path·2 string, @"".handler·3 @"net/http".Handler) + func (@"".s·2 *@"".DockerServer "esc:0x1") DefaultHandler () (? @"net/http".Handler) { return @"".s·2.@"".mux } + func (@"".s·2 *@"".DockerServer "esc:0x0") MutateContainer (@"".id·3 string "esc:0x0", @"".state·4 @"github.com/fsouza/go-dockerclient".State) (? error) + func (@"".s·1 *@"".DockerServer "esc:0x0") PrepareExec (@"".id·2 string, @"".callback·3 func()) { @"".s·1.@"".execCallbacks[@"".id·2] = @"".callback·3 } + func (@"".s·1 *@"".DockerServer "esc:0x0") PrepareFailure (@"".id·2 string, @"".urlRegexp·3 string) { @"".s·1.@"".failures[@"".id·2] = @"".urlRegexp·3 } + func (@"".s·1 *@"".DockerServer) PrepareMultiFailures (@"".id·2 string, @"".urlRegexp·3 string) { @"".s·1.@"".multiFailures = append(@"".s·1.@"".multiFailures, (map[string]string{ "error":@"".id·2, "url":@"".urlRegexp·3 })) } + func (@"".s·1 *@"".DockerServer "esc:0x0") PrepareStats (@"".id·2 string, @"".callback·3 func(? string) (? @"github.com/fsouza/go-dockerclient".Stats)) { @"".s·1.@"".statsCallbacks[@"".id·2] = @"".callback·3 } + func (@"".s·1 *@"".DockerServer "esc:0x0") ResetFailure (@"".id·2 string "esc:0x0") { delete(@"".s·1.@"".failures, @"".id·2) } + func (@"".s·1 *@"".DockerServer "esc:0x0") ResetMultiFailures () { @"".s·1.@"".multiFailures = ([]map[string]string{ }) } + func (@"".s·1 *@"".DockerServer) ServeHTTP (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer "esc:0x0") SetHook (@"".hook·2 func(? *@"net/http".Request)) { @"".s·1.@"".hook = @"".hook·2 } + func (@"".s·1 *@"".DockerServer) Stop () + func (@"".s·2 *@"".DockerServer) URL () (? string) + func (@"".s·1 *@"".DockerServer) @"".attachContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".buildImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".buildMuxer () + func (@"".s·1 *@"".DockerServer) @"".commitContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".createContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".createExecContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".createNetwork (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·4 *@"".DockerServer) @"".findContainer (@"".idOrName·5 string "esc:0x0") (? *@"github.com/fsouza/go-dockerclient".Container, ? int, ? error) + func (@"".s·3 *@"".DockerServer) @"".findImage (@"".id·4 string "esc:0x0") (? string, ? error) + func (@"".s·4 *@"".DockerServer) @"".findImageByID (@"".id·5 string "esc:0x0") (? string, ? int, ? error) + func (@"".s·4 *@"".DockerServer) @"".findNetwork (@"".idOrName·5 string "esc:0x0") (? *@"github.com/fsouza/go-dockerclient".Network, ? int, ? error) + func (@"".s·2 *@"".DockerServer "esc:0x0") @"".generateEvent () (? *@"github.com/fsouza/go-dockerclient".APIEvents) + func (@"".s·2 *@"".DockerServer "esc:0x0") @"".generateID () (? string) + func (@"".s·3 *@"".DockerServer) @"".getExec (@"".id·4 string "esc:0x0") (? *@"github.com/fsouza/go-dockerclient".ExecInspect, ? error) + func (@"".s·1 *@"".DockerServer "esc:0x0") @"".getImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·2 *@"".DockerServer) @"".handlerWrapper (@"".f·3 func(? @"net/http".ResponseWriter, ? *@"net/http".Request)) (? func(? @"net/http".ResponseWriter, ? *@"net/http".Request)) + func (@"".s·1 *@"".DockerServer) @"".inspectContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".inspectExecContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".inspectImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".listContainers (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer "esc:0x0") @"".listEvents (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".listImages (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".listNetworks (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer "esc:0x0") @"".loadImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".networkInfo (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer "esc:0x0") @"".notify (@"".container·2 *@"github.com/fsouza/go-dockerclient".Container) { if @"".s·1.@"".cChan != nil { @"".s·1.@"".cChan <- @"".container·2 } } + func (@"".s·1 *@"".DockerServer) @"".pauseContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer "esc:0x0") @"".pingDocker (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".pullImage (@"".w·2 @"net/http".ResponseWriter "esc:0x0", @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".pushImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".removeContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".removeImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".renameContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".resizeExecContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".startContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".startExecContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".statsContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".stopContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".tagImage (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request) + func (@"".s·1 *@"".DockerServer) @"".topContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".unpauseContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func (@"".s·1 *@"".DockerServer) @"".waitContainer (@"".w·2 @"net/http".ResponseWriter, @"".r·3 *@"net/http".Request "esc:0x0") + func @"".NewServer (@"".bind·3 string, @"".containerChan·4 chan<- *@"github.com/fsouza/go-dockerclient".Container, @"".hook·5 func(? *@"net/http".Request)) (? *@"".DockerServer, ? error) + func @"".init () + var @"time".months [12]string + var @"time".days [7]string + var @"time".Local *@"time".Location + var @"time".UTC *@"time".Location + type @"sync".rlocker struct { @"sync".w @"sync".Mutex; @"sync".writerSem uint32; @"sync".readerSem uint32; @"sync".readerCount int32; @"sync".readerWait int32 } + func (@"sync".r·1 *@"sync".rlocker) Lock () + func (@"sync".r·1 *@"sync".rlocker) Unlock () + var @"bufio".ErrInvalidUnreadRune error + var @"regexp/syntax".instOpNames []string + +$$ +_go_.6 0 0 0 644 429450 ` +go object darwin amd64 go1.4.2 X:precisestack + +! +go13ldarchive/tar.acrypto/rand.aencoding/json.aerrors.a +fmt.amath/rand.a +net.anet/http.aregexp.astrconv.astrings.a sync.a time.aFgithub.com/fsouza/go-dockerclient.a¢github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.a†github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.aþ"".NewServer  ˜ eH‹ %H„$ þÿÿH;AwèëâHìàHÇ„$HÇ„$HH,$H‰ïH‰ÞH¥H¥H‹œ$èH‰\$H‹œ$ðH‰\$èH‹\$ H‰\$pH‹\$(H‰\$xH‹D$0H‹L$8H‰Œ$ˆHƒøH‰„$€t$HÇ„$H‰„$H‰Œ$HÄàÃHH‰$èH‹\$H‰\$hHH‰$HÇD$èH‹\$H‰\$`HH‰$HÇD$èH‹\$H‰\$XHH‰$HÇD$èH‹\$H‰\$PHH‰$HÇD$èH‹\$H‰\$HHH‰$HÇD$èH‹D$HH¬$ H‰ïH‰ÞèH‹\$pH‰œ$hH‹\$xH‰œ$pH‹\$`H‰œ$0H‹œ$H‰œ$€H‹\$XH‰œ$ˆH‹\$PH‰œ$¨H‹\$HH‰œ$°H‰„$¸H‹œ$øH‰œ$ØHH‰$H‹\$hH‰\$Hœ$ H‰\$èH‹\$hH‰$èH‹\$hH‰\$@H‹1íH9ètpH‹\$pH‰$H‹\$xH‰\$H‹L$@H‰„$H‰D$H‰Œ$˜H‰L$H Qj0èYYH‹\$hH‰œ$HÇ„$HÇ„$HÄàÃHH‰$HH‰\$HH‰\$èH‹D$é^ÿÿÿ8 +00runtime.morestack_noctxt€go.string."tcp"Únet.Listen˜(type."".DockerServerª"runtime.newobjectÌ,type.map[string]stringðruntime.makemap’,type.map[string]string¶runtime.makemapØ,type.map[string]func()üruntime.makemapžˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.StatsÂruntime.makemapä@type.map[string]net/http.Handlerˆruntime.makemap """.statictmp_0019Æ  runtime.duffcopyÀ(type."".DockerServer€ .runtime.writebarrierfatœ :"".(*DockerServer).buildMuxer¾ Rgo.itab.*"".DockerServer.net/http.Handler¾ +"net/http.Serve·fÎ +runtime.newprocº *type.*"".DockerServerÐ *type.net/http.Handlerè Rgo.itab.*"".DockerServer.net/http.Handlerü  runtime.typ2ItabpÀ"".autotmp_0018¿*type.*"".DockerServer"".autotmp_0017ÿ(type."".DockerServer"".autotmp_0015¯ˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.Stats"".autotmp_0014Ÿ,type.map[string]func()"".autotmp_0013,type.map[string]string"".autotmp_0012ÿ,type.map[string]string"".&serverï*type.*"".DockerServer "".err¿type.error"".listenerß"type.net.Listener "".~r4Ptype.error "".~r3@*type.*"".DockerServer"".hook08type.func(*net/http.Request) "".containerChan ptype.chan<- *github.com/fsouza/go-dockerclient.Container"".bindtype.string8%À£¿ÀÛ,¿À6B=Z$ #### ·[-64lh#####¼ YWTgclocals·23366670ca46633c69280dc1e0c74865Tgclocals·84a0367b0d2b39a68501b8591f74d154ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ2"".(*DockerServer).notifyàÄeH‹ %H;awèëêHƒì H‹D$(H‹˜81íH9ët0H‹\$0H‰\$HH‰$H‹¨8H‰l$H\$H‰\$èHƒÄ à + 0runtime.morestack_noctxttptype.chan<- *github.com/fsouza/go-dockerclient.Container²"runtime.chansend1 @"".autotmp_0023btype.*github.com/fsouza/go-dockerclient.Container"".containerbtype.*github.com/fsouza/go-dockerclient.Container"".s*type.*"".DockerServer@G?p¸0 +XTgclocals·9d97800b9eac7aaad25644c1094f6baaTgclocals·e1ae6533a9e39048ba0735a2264ce16aü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ:"".(*DockerServer).buildMuxer §„§eH‹ %H;awèëêHƒìhHH‰$HÇD$èH‹\$H‰\$HHH‰$èH‹|$H‰ùHƒÿ„\)1ÀèH‰L$0H‰ $Hƒ<$„5)Hƒ$8H‹\$HH‰\$èH‹D$01í@ˆhAH‹\$pH‰$Hƒ<$„ö(H$ØH‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„(H-H‰øH‰îH¥H¥Hƒø„m(H‹\$@HÇÂHÇÁH‰$H‰D$PH‰D$H‰T$XH‰T$H‰L$`H‰L$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ê'Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„Q'H-H‰øH‰îH¥H¥Hƒø„/'H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„¬&Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„&H-H‰øH‰îH¥H¥Hƒø„ñ%H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„n%Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„Õ$H-H‰øH‰îH¥H¥Hƒø„³$H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„0$Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„—#H-H‰øH‰îH¥H¥Hƒø„u#H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ò"Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„Y"H-H‰øH‰îH¥H¥Hƒø„7"H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„´!Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„!H-H‰øH‰îH¥H¥Hƒø„ù H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„v Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„ÝH-H‰øH‰îH¥H¥Hƒø„»H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„8Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„ŸH-H‰øH‰îH¥H¥Hƒø„}H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„úHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„aH-H‰øH‰îH¥H¥Hƒø„?H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„¼Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„#H-H‰øH‰îH¥H¥Hƒø„H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„~Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„åH-H‰øH‰îH¥H¥Hƒø„ÃH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„@Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„§H-H‰øH‰îH¥H¥Hƒø„…H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„iH-H‰øH‰îH¥H¥Hƒø„GH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ÄHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„+H-H‰øH‰îH¥H¥Hƒø„ H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„†Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„íH-H‰øH‰îH¥H¥Hƒø„ËH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„HHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„¯H-H‰øH‰îH¥H¥Hƒø„H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ +Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„qH-H‰øH‰îH¥H¥Hƒø„OH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ÌHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„3H-H‰øH‰îH¥H¥Hƒø„H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ŽHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„õH-H‰øH‰îH¥H¥Hƒø„ÓH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„PHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„·H-H‰øH‰îH¥H¥Hƒø„•H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„yH-H‰øH‰îH¥H¥Hƒø„WH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„Ô Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„; H-H‰øH‰îH¥H¥Hƒø„ H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„– Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„ý H-H‰øH‰îH¥H¥Hƒø„Û H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„X Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„¿ +H-H‰øH‰îH¥H¥Hƒø„ +H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ +Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„ H-H‰øH‰îH¥H¥Hƒø„_ H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„ÜHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„CH-H‰øH‰îH¥H¥Hƒø„!H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„žHƒ$H‹\$pH‰\$èH‹\$8H‰$H‹\$(H‰\$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„H-H‰øH‰îH¥H¥Hƒø„ûH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„xHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„ßH-H‰øH‰îH¥H¥Hƒø„½H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„:Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„¡H-H‰øH‰îH¥H¥Hƒø„H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„üHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„cH-H‰øH‰îH¥H¥Hƒø„AH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„¾Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„%H-H‰øH‰îH¥H¥Hƒø„H‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$„€Hƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èH‹t$pH‹¾ØH‰<$H5H|$H¥H¥èH‹\$H‰\$@HH‰$èH‹|$Hƒÿ„çH-H‰øH‰îH¥H¥Hƒø„ÅH‹\$@HÇÁHÇÂH‰$H‰D$PH‰D$H‰L$XH‰L$H‰T$`H‰T$èH‹\$ H‰\$8HH‰$èH‹D$H-H‰(H‰D$(H‰$Hƒ<$tIHƒ$H‹\$pH‰\$èH‹\$pH‰$H‹\$(H‰\$èH‹D$H‹\$8H‰$H‰D$èHƒÄhÉ%뮉é4ÿÿÿ‰éÿÿÿ‰%étþÿÿ‰éöýÿÿ‰éÔýÿÿ‰%é6ýÿÿ‰é¸üÿÿ‰é–üÿÿ‰%éøûÿÿ‰ézûÿÿ‰éXûÿÿ‰%éºúÿÿ‰é<úÿÿ‰éúÿÿ‰%é|ùÿÿ‰éþøÿÿ‰éÜøÿÿ‰%éVøÿÿ‰éØ÷ÿÿ‰é¶÷ÿÿ‰%é÷ÿÿ‰éšöÿÿ‰éxöÿÿ‰%éÚõÿÿ‰é\õÿÿ‰é:õÿÿ‰%éœôÿÿ‰éôÿÿ‰éüóÿÿ‰%é^óÿÿ‰éàòÿÿ‰é¾òÿÿ‰%é òÿÿ‰é¢ñÿÿ‰é€ñÿÿ‰%éâðÿÿ‰édðÿÿ‰éBðÿÿ‰%é¤ïÿÿ‰é&ïÿÿ‰éïÿÿ‰%éfîÿÿ‰éèíÿÿ‰éÆíÿÿ‰%é(íÿÿ‰éªìÿÿ‰éˆìÿÿ‰%éêëÿÿ‰élëÿÿ‰éJëÿÿ‰%é¬êÿÿ‰é.êÿÿ‰é êÿÿ‰%énéÿÿ‰éðèÿÿ‰éÎèÿÿ‰%é0èÿÿ‰é²çÿÿ‰éçÿÿ‰%éòæÿÿ‰étæÿÿ‰éRæÿÿ‰%é´åÿÿ‰é6åÿÿ‰éåÿÿ‰%éväÿÿ‰éøãÿÿ‰éÖãÿÿ‰%é8ãÿÿ‰éºâÿÿ‰é˜âÿÿ‰%éúáÿÿ‰é|áÿÿ‰éZáÿÿ‰%é¼àÿÿ‰é>àÿÿ‰éàÿÿ‰%é~ßÿÿ‰éßÿÿ‰éÞÞÿÿ‰%é@Þÿÿ‰éÂÝÿÿ‰é Ýÿÿ‰%éÝÿÿ‰é„Üÿÿ‰ébÜÿÿ‰%éÄÛÿÿ‰éFÛÿÿ‰é$Ûÿÿ‰%é†Úÿÿ‰éÚÿÿ‰éæÙÿÿ‰%éHÙÿÿ‰éÊØÿÿ‰é¨Øÿÿ‰%é +Øÿÿ‰éŒ×ÿÿ‰éj×ÿÿ‰%éþÖÿÿ‰%é¿Öÿÿ‰éÖÿÿ¨ + 0runtime.morestack_noctxt:°type.map[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route^runtime.makemap€štype.github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router’"runtime.newobjectÄÜ runtime.duffzero”.runtime.writebarrierptrö.runtime.writebarrierptr¤&go.string."/commit"À github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Pathâtype.[1]stringô"runtime.newobject """.statictmp_0126¼¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).MethodsÞZtype.struct { F uintptr; R *"".DockerServer }ð"runtime.newobjectˆR"".*DockerServer.("".commitContainer)·fmÞ.runtime.writebarrierptrŽB"".(*DockerServer).handlerWrapper¾¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncì8go.string."/containers/json"ˆ github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Pathªtype.[1]string¼"runtime.newobjectè""".statictmp_0130„ +¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methods¦ +Ztype.struct { F uintptr; R *"".DockerServer }¸ +"runtime.newobjectÐ +P"".*DockerServer.("".listContainers)·fm¦ .runtime.writebarrierptrÖ B"".(*DockerServer).handlerWrapper† ¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFunc´ ¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncÌ>>go.string."/containers/{id:.*}"è> github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).PathŠ?type.[1]stringœ?"runtime.newobjectÈ?""".statictmp_0178ä@¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methods†AZtype.struct { F uintptr; R *"".DockerServer }˜A"runtime.newobject°AR"".*DockerServer.("".removeContainer)·fm†B.runtime.writebarrierptr¶BB"".(*DockerServer).handlerWrapperæB¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFunc”CHgo.string."/containers/{id:.*}/exec"°C github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).PathÒCtype.[1]stringäC"runtime.newobjectD""".statictmp_0182¬E¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).MethodsÎEZtype.struct { F uintptr; R *"".DockerServer }àE"runtime.newobjectøEZ"".*DockerServer.("".createExecContainer)·fmÎF.runtime.writebarrierptrþFB"".(*DockerServer).handlerWrapper®G¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncÜGJgo.string."/containers/{id:.*}/stats"øG github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).PathšHtype.[1]string¬H"runtime.newobjectØH""".statictmp_0186ôI¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methods–JZtype.struct { F uintptr; R *"".DockerServer }¨J"runtime.newobjectÀJP"".*DockerServer.("".statsContainer)·fm–K.runtime.writebarrierptrÆKB"".(*DockerServer).handlerWrapperöK¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFunc¤L@go.string."/exec/{id:.*}/resize"ÀL github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).PathâLtype.[1]stringôL"runtime.newobject M""".statictmp_0190¼N¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).MethodsÞNZtype.struct { F uintptr; R *"".DockerServer }ðN"runtime.newobjectˆOZ"".*DockerServer.("".resizeExecContainer)·fmÞO.runtime.writebarrierptrŽPB"".(*DockerServer).handlerWrapper¾P¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncìP>go.string."/exec/{id:.*}/start"ˆQ github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).PathªQtype.[1]string¼Q"runtime.newobjectèQ""".statictmp_0194„S¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methods¦SZtype.struct { F uintptr; R *"".DockerServer }¸S"runtime.newobjectÐSX"".*DockerServer.("".startExecContainer)·fm¦T.runtime.writebarrierptrÖTB"".(*DockerServer).handlerWrapper†U¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFunc´Ugo.string."/images/{id:.*}/get"¸‡ github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).PathÚ‡type.[1]stringì‡"runtime.newobject˜ˆ""".statictmp_0242´‰¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).MethodsÖ‰Ztype.struct { F uintptr; R *"".DockerServer }è‰"runtime.newobject€ŠD"".*DockerServer.("".getImage)·fmÖŠ.runtime.writebarrierptr†‹B"".(*DockerServer).handlerWrapper¶‹¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncä‹*go.string."/networks"€Œ github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Path¢Œtype.[1]string´Œ"runtime.newobjectàŒ""".statictmp_0246ü¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).MethodsžŽZtype.struct { F uintptr; R *"".DockerServer }°Ž"runtime.newobjectÈŽL"".*DockerServer.("".listNetworks)·fmž.runtime.writebarrierptrÎB"".(*DockerServer).handlerWrapperþ¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFunc¬:go.string."/networks/{id:.*}"È github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Pathêtype.[1]stringü"runtime.newobject¨‘""".statictmp_0250Ä’¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methodsæ’Ztype.struct { F uintptr; R *"".DockerServer }ø’"runtime.newobject“J"".*DockerServer.("".networkInfo)·fmæ“.runtime.writebarrierptr–”B"".(*DockerServer).handlerWrapperÆ”¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncô”*go.string."/networks"• github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Path²•type.[1]stringÄ•"runtime.newobjectð•""".statictmp_0254Œ—¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methods®—Ztype.struct { F uintptr; R *"".DockerServer }À—"runtime.newobjectØ—N"".*DockerServer.("".createNetwork)·fm¦˜.runtime.writebarrierptrÖ˜B"".(*DockerServer).handlerWrapper†™¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncÐŽ"".autotmp_0256\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0255type.*[1]string"".autotmp_0253type.[]string"".autotmp_0252\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0251type.*[1]string"".autotmp_0249type.[]string"".autotmp_0248\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0247type.*[1]string"".autotmp_0245type.[]string"".autotmp_0244\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0243type.*[1]string"".autotmp_0241type.[]string"".autotmp_0240\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0239type.*[1]string"".autotmp_0237type.[]string"".autotmp_0236\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0235type.*[1]string"".autotmp_0233type.[]string"".autotmp_0232\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0231type.*[1]string"".autotmp_0229type.[]string"".autotmp_0228\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0227type.*[1]string"".autotmp_0225type.[]string"".autotmp_0224\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0223type.*[1]string"".autotmp_0221type.[]string"".autotmp_0220\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0219type.*[1]string"".autotmp_0217type.[]string"".autotmp_0216\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0215type.*[1]string"".autotmp_0213type.[]string"".autotmp_0212\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0211type.*[1]string"".autotmp_0209type.[]string"".autotmp_0208\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0207type.*[1]string"".autotmp_0205type.[]string"".autotmp_0204\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0203type.*[1]string"".autotmp_0201type.[]string"".autotmp_0200\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0199type.*[1]string"".autotmp_0197type.[]string"".autotmp_0196\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0195type.*[1]string"".autotmp_0193type.[]string"".autotmp_0192\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0191type.*[1]string"".autotmp_0189type.[]string"".autotmp_0188\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0187type.*[1]string"".autotmp_0185type.[]string"".autotmp_0184\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0183type.*[1]string"".autotmp_0181type.[]string"".autotmp_0180\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0179type.*[1]string"".autotmp_0177type.[]string"".autotmp_0176\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0175type.*[1]string"".autotmp_0173type.[]string"".autotmp_0172\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0171type.*[1]string"".autotmp_0169type.[]string"".autotmp_0168\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0167type.*[1]string"".autotmp_0165type.[]string"".autotmp_0164\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0163type.*[1]string"".autotmp_0161type.[]string"".autotmp_0160\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0159type.*[1]string"".autotmp_0157type.[]string"".autotmp_0156\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0155type.*[1]string"".autotmp_0153type.[]string"".autotmp_0152\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0151type.*[1]string"".autotmp_0149type.[]string"".autotmp_0148\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0147type.*[1]string"".autotmp_0145type.[]string"".autotmp_0144\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0143type.*[1]string"".autotmp_0141type.[]string"".autotmp_0140\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0139type.*[1]string"".autotmp_0137type.[]string"".autotmp_0136\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0135type.*[1]string"".autotmp_0133type.[]string"".autotmp_0132\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0131type.*[1]string"".autotmp_0129type.[]string"".autotmp_0128\type.*struct { F uintptr; R *"".DockerServer }"".autotmp_0125/type.[]string"".autotmp_0124oœtype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router"".autotmp_0123œtype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router"".autotmp_0122jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0121štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0120štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0119jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0118štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0117štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0116jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0115štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0114štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0113jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0112štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0111štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0110jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0109štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0108štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0107jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0106štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0105štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0104štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0103štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0102jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0101štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0100štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0099jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0098štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0097štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0096jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0095štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0094štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0093jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0092štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0091štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0090jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0089štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0088štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0087jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0086štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0085štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0084jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0083štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0082štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0081jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0080štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0079štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0078jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0077štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0076štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0075jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0074štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0073štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0072jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0071štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0070štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0069jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0068štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0067štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0066jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0065štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0064štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0063jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0062štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0061štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0060jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0059štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0058štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0057jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0056štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0055štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0054jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0053štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0052štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0051jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0050štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0049štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0048jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0047štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0046štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0045jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0044štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0043štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0042jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0041štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0040štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0039jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0038štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0037štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0036jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0035štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0034štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0033jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0032štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0031štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0030jtype.func(net/http.ResponseWriter, *net/http.Request)"".autotmp_0029štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0028štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0026_štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0025Oštype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".autotmp_0024?°type.map[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route"".s*type.*"".DockerServerбLÏЄÐSàÄ¥¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤Œ¤¤¤¤¤ -¬.A1?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7?d7 ?d7?d7?d7?d7?d7?d3ŽTgclocals·2c09ec81c5cb12328d7183f25bc48833Tgclocals·76225bbef6ae6e9e5960f6f7925b8185ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ4"".(*DockerServer).SetHook žeH‹ %H;awèëêHƒìH‹\$H‰$Hƒ<$tH$àH‹\$ H‰\$èHƒÄÉ%ëÛ + 0runtime.morestack_noctxtz.runtime.writebarrierptr "".hook8type.func(*net/http.Request)"".s*type.*"".DockerServer + +P”' + +<Tgclocals·e8c55b930b09fa5028b5e4b78b8932dcTgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ<"".(*DockerServer).PrepareExec€äeH‹ %H;awèëêHƒì8H‹\$HH‰\$(H‹\$PH‰\$0H‹\$XH‰\$ HH‰$H‹\$@H‹«H‰l$H\$(H‰\$H\$ H‰\$èHƒÄ8à + 0runtime.morestack_noctxtv,type.map[string]func()Ò$runtime.mapassign1@p +"".autotmp_0357/type.func()"".autotmp_0356type.string"".callback0type.func() +"".idtype.string"".s*type.*"".DockerServerpWo€ÄS +hTgclocals·5197b04b6fafdc0c7d1822cc34066683Tgclocals·31214a5fe2ac06a8b2e85038c37289d6ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ>"".(*DockerServer).PrepareStats€äeH‹ %H;awèëêHƒì8H‹\$HH‰\$(H‹\$PH‰\$0H‹\$XH‰\$ HH‰$H‹\$@H‹«H‰l$H\$(H‰\$H\$ H‰\$èHƒÄ8à + 0runtime.morestack_noctxtvˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.StatsÒ$runtime.mapassign1@p +"".autotmp_0359/rtype.func(string) github.com/fsouza/go-dockerclient.Stats"".autotmp_0358type.string"".callback0rtype.func(string) github.com/fsouza/go-dockerclient.Stats +"".idtype.string"".s*type.*"".DockerServerpWo€ÖS +hTgclocals·5197b04b6fafdc0c7d1822cc34066683Tgclocals·31214a5fe2ac06a8b2e85038c37289d6ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).PrepareFailure€øeH‹ %H;awèëêHƒì@H‹\$PH‰\$0H‹\$XH‰\$8H‹\$`H‰\$ H‹\$hH‰\$(HH‰$H‹\$HH‹«èH‰l$H\$0H‰\$H\$ H‰\$èHƒÄ@à + 0runtime.morestack_noctxtŠ,type.map[string]stringæ$runtime.mapassign1P€ +"".autotmp_0361?type.string"".autotmp_0360type.string"".urlRegexp0type.string +"".idtype.string"".s*type.*"".DockerServer€a€â]  +rTgclocals·1765c43755fbf91dfae87195c1ec24fbTgclocals·f29b89ce4cd57d8100665fbda8fdf405ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþN"".(*DockerServer).PrepareMultiFailuresàÖeH‹ %HD$èH;AwèëåHì˜HH‰$HÇD$èH‹D$HH‹+H‰l$XH‹kH‰l$`H‹œ$¨H‰\$HH‹œ$°H‰\$PHH‰$H‰D$@H‰D$H\$XH‰\$H\$HH‰\$èHH‹+H‰l$XH‹kH‰l$`H‹œ$¸H‰\$HH‹œ$ÀH‰\$PHH‰$H‹\$@H‰\$H\$XH‰\$H\$HH‰\$èH‹œ$ Hƒû„H‹“ðH‹‹øH‹›H‰”$€H‰Œ$ˆH‰œ$H‰ØH)ËHƒû}FHH‰$H‰T$hH‰T$H‰L$pH‰L$H‰D$xH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰t$pH‰D$xH‰T$hHÊH‰$H‹\$@H‰\$èH‹T$hH‹L$pH‹D$xH‹œ$ H‰$Hƒ<$t"".(*DockerServer).ResetFailureÀ¼eH‹ %H;awèëêHƒì(H‹\$8H‰\$H‹\$@H‰\$ HH‰$H‹\$0H‹«èH‰l$H\$H‰\$èHƒÄ(à + 0runtime.morestack_noctxtb,type.map[string]stringª"runtime.mapdelete0P"".autotmp_0375type.string +"".idtype.string"".s*type.*"".DockerServerPCO`ø? +T Tgclocals·bd51743682bd6c0f7b9f2e8e6dffed99Tgclocals·8d600a433c6aaa81a4fe446d95c5546bü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþJ"".(*DockerServer).ResetMultiFailures ŽeH‹ %H;awèëêHƒì8HH‰$èH‹l$HƒýtMH‹\$@1Ò1ÉH‰$Hƒ<$t0H$ðH‰l$ H‰l$H‰T$(H‰T$H‰L$0H‰L$èHƒÄ8É%ëljEë® + + 0runtime.morestack_noctxt:2type.[0]map[string]stringL"runtime.newobjectà2runtime.writebarrierslicep"".autotmp_0376/0type.[]map[string]string"".s*type.*"".DockerServerp^op‚Z +%kTgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·0528ab8f76149a707fd2f0025c2178a3ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).CustomHandler žeH‹ %H;awèëêHƒì@H‹\$HH‰$Hƒ<$„•H$ èH‹\$PH‰\$0H‹\$XH‰\$8H‹\$`H‰\$ H‹\$hH‰\$(HH‰$H‹\$HH‹«H‰l$H\$0H‰\$H\$ H‰\$èH‹\$HH‰$Hƒ<$tH$ èHƒÄ@É%ëå‰%é_ÿÿÿ + 0runtime.morestack_noctxtn(sync.(*RWMutex).LockÌ@type.map[string]net/http.Handler¨$runtime.mapassign1â,sync.(*RWMutex).UnlockP€ +"".autotmp_0382?*type.net/http.Handler"".autotmp_0381type.string"".handler0*type.net/http.Handler"".pathtype.string"".s*type.*"".DockerServer€Ÿ€Ð ˜!]  6] Tgclocals·925be0824eaf197a56a5d7050bf29309Tgclocals·85223f890d4c8f80203775beed82eaddü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".(*DockerServer).MutateContainer€ +ä eH‹ %HD$H;AwèëåHìðL‹„$HÇ„$hHÇ„$pH‹œ$øHƒû„H‹H‹CH‹kH‰¬$1öH‰„$ˆH‰D$(H‰”$€H‹l$(H9îH‰T$HH‹H‰t$0H‰\$8Hƒû„¼H‹;H‰|$pH‹CH‰D$xL9À…µH‰<$H‰D$H‹¬$H‰l$L‰D$èL‹„$H‹t$0H‹T$H¶\$ €ûtyHœ$H¬$˜H‰ïH‰ÞèHH‰$H‹\$8H‰\$Hƒ|$t8HƒD$XHœ$˜H‰\$èHÇ„$hHÇ„$pHÄðÉ%ë¿HƒÂHÿÆH‹l$(H9îŒÿÿÿHH‹+H‰l$`H‹kH‰l$hHÇD$PHÇD$XHH‰$èH‹D$H‰D$@H‰$Hƒ<$t}H‹\$`H‰\$H‹\$hH‰\$èH‹\$@H‰\$@H‹1íH9ètH‹L$@H‰„$hH‰Œ$pHÄðÃHH‰$HH‰\$HH‰\$èH‹D$ë´‰%éwÿÿÿ‰é=þÿÿ‰éâýÿÿ +*0runtime.morestack_noctxt¼ runtime.eqstringªÌ runtime.duffcopy¸Xtype.github.com/fsouza/go-dockerclient.State”.runtime.writebarrierfatž>go.string."container not found"ò.type.errors.errorString„"runtime.newobjectà4runtime.writebarrierstring‚Bgo.itab.*errors.errorString.errorØ0type.*errors.errorStringîtype.error† Bgo.itab.*errors.errorString.errorš  runtime.typ2Itab€à"".autotmp_0391ß0type.*errors.errorString"".autotmp_0390ÿtype.string"".autotmp_0388Ïdtype.**github.com/fsouza/go-dockerclient.Container"".autotmp_0387type.int"".autotmp_0386ÿtype.int"".autotmp_03850type.*errors.errorString"".autotmp_0384¯Xtype.github.com/fsouza/go-dockerclient.State"".autotmp_0383ßftype.[]*github.com/fsouza/go-dockerclient.Container "".~r0¿type.errorerrors.text·2Ÿtype.string"".containerïbtype.*github.com/fsouza/go-dockerclient.Container "".~r2àtype.error"".state0Xtype.github.com/fsouza/go-dockerclient.State +"".idtype.string"".s*type.*"".DockerServer("àËßàºßàW€.¨BWeP   Ø Ýlx.‘Tgclocals·1e2d550ac4f017d716d87ff44946577fTgclocals·0e8ff9f111235a6bccca3fa33f624774ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ."".(*DockerServer).Stop  eH‹ %H;awèëêHƒì(H‹D$0Hƒ¸Èt"H‹ˆÈH‹¨ÐH‰l$ H‰,$H‰L$H‹Y0ÿÓHƒÄ(à + 0runtime.morestack_noctxt’ +P"".s*type.*"".DockerServerP5OP¾ +" +ITgclocals·519efd86263089ddb84df3cfe7fd2992Tgclocals·d64e51a4c4bfeaa840e480961ec6b0b3ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ,"".(*DockerServer).URL –eH‹ %H;awèëêHƒìpH‹D$xHÇ„$€HÇ„$ˆHƒ¸ÈuHÇ„$€HÇ„$ˆHƒÄpÃH‹ˆÈH‹¨ÐH‰l$HH‰,$H‰L$@H‹Y(ÿÓH‹L$H‹D$H‰D$hH‰$H‰L$`H‹Y(ÿÓH‹L$H‹D$HH,$H‰ïH‰ÞH¥H¥H‰L$PH‰L$H‰D$XH‰D$HHl$ H‰ïH‰ÞH¥H¥èH‹\$0H‰œ$€H‹\$8H‰œ$ˆHƒÄpà + 0runtime.morestack_noctxtü +¸ +Ö&go.string."http://"¨go.string."/"Ð*runtime.concatstring30à"".autotmp_0397?type.string"".autotmp_0396type.net.Addr "".~r0type.string"".s*type.*"".DockerServer àCßà¬ßÌ7 +² ~’Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·f883d3996c76325fd1714d4e3de9fa33ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ8"".(*DockerServer).ServeHTTPà Ö eH‹ %HD$ÈH;AwèëåHì¸H‹œ$ÀH‰$Hƒ<$„&H$ èH‹œ$ÀH‰$Hƒ<$„öH$ H QjèYYH…À…ÆH‹œ$ÀH‹«H|$h1ÀèHH‰$H‰l$H\$hH‰\$èH‹\$h1íH9ë„ìH‹\$pHƒû„fH‹;H‹sH‹\$hHƒû„IH‹H‹kH‰|$XH‰|$HH‰t$`H‰t$PH‰T$8H‰$H‰l$@H‰l$H‹´$ØH‹~Hƒÿ„þHw8H|$H¥H¥è¶\$ €ûtIH‹œ$ÈH‰\$H‹œ$ÐH‰\$H‹œ$ØH‰\$H‹\$PH‰$H‹\$HH‹[ ÿÓèHĸÃH\$hH‰$èH‹\$h1íH9ë…ÿÿÿH‹œ$ÀH‹«ØH‰,$H‹œ$ÈH‰\$H‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹Œ$ÀH‹™à1íH9ëtH‹œ$ØH‰$H‹‘àH‹ÿÓèHĸÉéûþÿÿ‰é°þÿÿ‰é“þÿÿèHĸÉ%éþýÿÿ‰%éÎýÿÿ +*0runtime.morestack_noctxt„*sync.(*RWMutex).RLockÐ4sync.(*RWMutex).RUnlock·fà"runtime.deferproc¬Ø runtime.duffzeroº@type.map[string]net/http.Handlerê&runtime.mapiterinit€$regexp.MatchStringŽ +–&runtime.deferreturnÂ&runtime.mapiternextàªgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).ServeHTTPÀ +È&runtime.deferreturnŽ &runtime.deferreturn@ð"".autotmp_0401¿*type.net/http.Handler"".autotmp_0400ŸJtype.map.iter[string]net/http.Handler"".handlerß*type.net/http.Handler +"".reÿtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerD"ðK ïð˜ïð"ïððFÜ"$9ŠE; GAs‹HhTgclocals·bc335ce91c3a8b5f426dd201465802bdTgclocals·3901c619f635162fa423fe138099ace5ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).DefaultHandler ˆeH‹ %H;awèëêHƒì HÇD$0HÇD$8H‹1íH9ètH‹\$(H‹«ØH‰l$8H‰D$0HƒÄ ÃHH‰$HH‰\$HH‰\$èH‹D$ë¶ + 0runtime.morestack_noctxt^Ägo.itab.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router.net/http.Handler°œtype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouterÆ*type.net/http.HandlerÞÄgo.itab.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router.net/http.Handlerò runtime.typ2Itab0@ "".~r0*type.net/http.Handler"".s*type.*"".DockerServer@:?@; þ,d +xTgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).handlerWrapperàÌeH‹ %H;awèëêHƒì(HH‰$èH‹D$H‰D$H‹l$0H‰(HH‰$èH‹D$H‰D$ H‹l$8H‰(HH‰$èH‹D$H-H‰(H‰D$H‰$Hƒ<$tPHƒ$H‹\$H‰\$èH‹\$H‰$Hƒ<$t#Hƒ$H‹\$ H‰\$èH‹\$H‰\$@HƒÄ(É%ëÔ‰%ë§ + 0runtime.morestack_noctxt:*type.*"".DockerServerL"runtime.newobject~jtype.func(net/http.ResponseWriter, *net/http.Request)"runtime.newobjectÂÊtype.struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }Ô"runtime.newobjectì"".func·001º.runtime.writebarrierptr‚.runtime.writebarrierptr0P"".autotmp_0404/Ìtype.*struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) } +"".&fltype.*func(net/http.ResponseWriter, *net/http.Request) +"".&s,type.**"".DockerServer "".~r1 jtype.func(net/http.ResponseWriter, *net/http.Request)P¹OPð†^8’%""3$0Tgclocals·ab01a2d55089ff50c402006df1039c39Tgclocals·be18fcff1e4d1cf801d0b47f660b9806ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).listContainersÀ+¢+eH‹ %H„$ØýÿÿH;AwèëâHì¨1ÀH¼$ èH‹œ$ÈH‹kH‰,$èH‹D$HH‹H‰”$˜H‹KH‰Œ$ HÇD$xHÇ„$€1íH9è…Ž 1É1ÀH‰L$xH‰Œ$H‰„$€H‰„$ H‹œ$°H‰$Hƒ<$„J Hƒ$HèH‹œ$°H‹kHH‰$HÇD$H‰l$èH‹T$H‹L$ H‹D$(H‰”$XH‰Œ$`H‰„$hH‹œ$°Hƒû„ÖH‹3H‹CH‹kH‰¬$à1ÿH‰„$ØH‰D$HH‰´$ÐH‹l$HH9ïYH‰t$pH‹H‰|$PH‰D$`H‹œ$ Hƒû…hH‹¬$H‰,$H‹´$ H‰t$H5LD$L‰ÇH¥H¥èH‹|$PH‹t$pH‹D$`¶\$ €û„Hƒø„ Hh8H$H‰ßH‰îH¥H¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹\$(H‰œ$øH‹\$0H‰œ$H¼$1ÀèHœ$Hƒû„˜HÇÅHÇÂH‰œ$ˆH‰¬$H‰”$˜HH‰$H‹\$`H‰\$Hƒ|$„EHƒD$(èH‹T$H‹D$H‹œ$ˆH‰$H‰”$¸H‰T$H‰„$ÀH‰D$èHH‰$Hœ$øH‰\$èH‹T$H‹D$H‹œ$ˆHƒÃH‰$H‰”$¸H‰T$H‰„$ÀH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$ èH‹T$`H‹\$(H‰œ$H‹\$0H‰œ$Hƒú„>H‹Z‹j‰¬$HH‹j H‰¬$PH‰œ$@H½ nˆñÿÿÿHëH‰\$@H‰$Hƒ<$„ïHƒ$XèH‹\$H‰œ$èH‹\$H‰œ$ðH‹\$`H‹«ÈH‰,$èH‹\$H‰œ$¸H‹\$H‰œ$ÀH‹\$H‰œ$ÈHœ$ØHÇHÇCHœ$ØHƒû„_HÇÂHÇÅH‰œ$ˆH‰”$H‰¬$˜HH‰$H‹\$`H‰\$Hƒ|$„ HD$ èH‹T$H‹D$H‹œ$ˆH‰$H‰”$¸H‰T$H‰„$ÀH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$ èH‹D$`H‹\$(H‰œ$ÈH‹t$0H‰´$ÐH5H¬$ H‰ïèHƒø„AH(Hœ$ H‰ßH‰îH¥H¥H¨°Hœ$0H‰ßH‰îH¥H¥H‹œ$H‰œ$@H‹œ$H‰œ$HH‹\$@H‰œ$PH‹œ$èH‰œ$XH‹œ$ðH‰œ$`H‹œ$¸H‰œ$hH‹œ$ÀH‰œ$pH‹œ$ÈH‰œ$xHH‰$èH‹T$Hƒú„pHÇÁHÇÅH‰”$H‰Œ$˜H‰¬$ H‹„$H‹œ$˜H‰œ$xH‹œ$ H‰œ$€H‰„$pH‰$H‹œ$ÈH‰\$H‹œ$ÐH‰\$èH‹”$XH‹Œ$`H‹œ$hH‰ØH)ËHƒû}OHH‰$H‰”$èH‰T$H‰Œ$ðH‰L$H‰„$øH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ðH‰„$øHH‰$H‰ÓH‰”$èH‰ÍHiíˆHëH‰\$Hœ$ H‰\$èH‹|$PH‹t$pH‹”$èH‹Œ$ðH‹„$øH‰”$XH‰Œ$`H‰„$hHƒÆHÿÇH‹l$HH9ùÿÿH‹œ$°H‰$Hƒ<$„ÂHƒ$HèH‹œ$ÀH‰$H‹œ$¸H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$ÀH‰$H‹œ$¸H‹[0ÿÓHH‰$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹L$H‹D$ H‰Œ$¨H‰Œ$ˆH‰„$°H‰„$HH‰$èH‹|$H‰ùHƒÿ„Ç1ÀèH‰L$hH‰ $Hƒ<$„ H‹œ$ˆH‰\$H‹œ$H‰\$èH‹\$hH‰\$XH‹œ$XH‰œ$ H‹œ$`H‰œ$¨H‹œ$hH‰œ$°HH‰$Hœ$ H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$XH‰$èHĨÉ%éTÿÿÿ‰é2ÿÿÿ‰%é2þÿÿ‰é‰üÿÿ‰é¸ûÿÿ‰%éèúÿÿ‰éšúÿÿ‰%éúÿÿ‰é»ùÿÿ‰%é¯øÿÿ‰éaøÿÿ‰éð÷ÿÿ¶XX€û„«ýÿÿéÔ÷ÿÿ‰é#÷ÿÿ‰%éªöÿÿHÇ„$(HÇ„$0HÇ„$8HH‰$H‰D$H‰”$H‰T$H‰Œ$H‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$(H‹PH‰”$0H‹hH‰¬$8€ûtHƒútHƒúv H‹H‹FéÓõÿÿè 1É1ÀéÃõÿÿ‰ë¬n +00runtime.morestack_noctxt`¼ runtime.duffzeroŠ(net/url.(*URL).Query¢go.string."all"”*sync.(*RWMutex).RLockºltype.[]github.com/fsouza/go-dockerclient.APIContainersè"runtime.makesliceÒgo.string."1"ô runtime.eqstringøgo.string." " strings.Joinòð runtime.duffzeroð type.stringº +runtime.convT2E¤ 2runtime.writebarrieriface² type.stringÞ runtime.convT2EÐ 2runtime.writebarrierifaceÞ "go.string."%s %s"Ò fmt.Sprintf¾bgithub.com/fsouza/go-dockerclient.(*State).Stringœ†github.com/fsouza/go-dockerclient.(*NetworkSettings).PortMappingAPI–type.stringæruntime.convT2EÐ2runtime.writebarrierifaceÞgo.string."/%s"Òfmt.Sprintfž""".statictmp_0435¾¤ runtime.duffcopy¶type.[1]stringÈ"runtime.newobjectØ4runtime.writebarrierstring®ltype.[]github.com/fsouza/go-dockerclient.APIContainers "runtime.growsliceøhtype.github.com/fsouza/go-dockerclient.APIContainersÞ.runtime.writebarrierfat¾.sync.(*RWMutex).RUnlockö +’ 0go.string."Content-Type"¸ 8go.string."application/json"à &net/http.Header.Setª! +´!type.io.Writerú!runtime.convI2IÜ"4type.encoding/json.Encoderî""runtime.newobject #ð runtime.duffzero†$2runtime.writebarrierifaceˆ%ltype.[]github.com/fsouza/go-dockerclient.APIContainers´%runtime.convT2Eø%>encoding/json.(*Encoder).Encode‚)&type.net/url.ValuesÒ)4runtime.mapaccess2_faststrü*$runtime.panicindex@Ð +R"".autotmp_0444ÿ6type.*encoding/json.Encoder"".autotmp_04436type.*encoding/json.Encoder"".autotmp_0442ÿtype.io.Writer"".autotmp_0439type.int"".autotmp_0438type.int"".autotmp_0437ltype.[]github.com/fsouza/go-dockerclient.APIContainers"".autotmp_0434htype.github.com/fsouza/go-dockerclient.APIContainers"".autotmp_0433"type.interface {}"".autotmp_0431&type.[]interface {}"".autotmp_0429"type.interface {}"".autotmp_0428ß"type.interface {}"".autotmp_0426¿&type.[]interface {}"".autotmp_0424ïdtype.**github.com/fsouza/go-dockerclient.Container"".autotmp_0423¿ type.int"".autotmp_0422type.int"".autotmp_0420ltype.[]github.com/fsouza/go-dockerclient.APIContainers"".autotmp_0417¿type.string"".autotmp_0416Ÿ(type.[1]interface {}"".autotmp_0415ß`type.[]github.com/fsouza/go-dockerclient.APIPort"".autotmp_0414ÿtype.string"".autotmp_0413type.string"".autotmp_0412ßtype.string"".autotmp_0411Ï(type.[2]interface {}"".autotmp_0410¯ftype.[]*github.com/fsouza/go-dockerclient.Container"".autotmp_0409ÿltype.[]github.com/fsouza/go-dockerclient.APIContainers"".autotmp_0408type.int"".autotmp_0407¯ type.int"".autotmp_0406¿type.string "".~r0Ÿ 6type.*encoding/json.Encoder$encoding/json.w·2¿type.io.Writer "".~r0Ï type.int64time.t·2Ïtype.time.Time "".~r0ßtype.stringnet/url.vs·4ÿtype.[]stringnet/url.key·3Ÿtype.string"".container btype.*github.com/fsouza/go-dockerclient.Container"".resultŸltype.[]github.com/fsouza/go-dockerclient.APIContainers "".allŸtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer%Ð +âÏ +Ð +Øà†Ä4y!Q\`±>3<“á!Q#©     ÆdD…ðãÌv/¥vûˆd_pØLW"í!xTgclocals·02bfe185cbfa386cc6696a665007ff28Tgclocals·a74ca190396b92ed76efe93c61653942ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ:"".(*DockerServer).listImages šeH‹ %H„$PúÿÿH;AwèëâHì0H‹œ$8H‰$Hƒ<$„EHƒ$HèH‹œ$8H‹khHH‰$H‰l$H‰l$èH‹T$H‹L$ H‹D$(H‰”$ØH‰Œ$àH‰„$èH‹œ$8Hƒû„ÕH‹S`H‹KhH‹kpH‰¬$H1ÀH‰Œ$@H‰L$HH‰”$8H‹l$HH9è™H‰T$hHƒú„†H¬$ H‰ïH‰ÖèH‰D$PH‰ÂHœ$ H¬$H‰ïH‰ÞèH‹Œ$@‹œ$H‰œ$ÈH‹œ$PH‰œ$ÐH‰Œ$ÀH½ nˆñÿÿÿHéH¼$ 1ÀèHœ$H¬$ H‰ïH‰ÞH¥H¥H‰Œ$ÈHH‰$H‹œ$ØH‰ÕH‰T$@L‹„$àL9ƒ±HkípHëH‰\$Hœ$ H‰\$èH‹œ$8H‹«H¼$P1ÀèHH‰$H‰l$Hœ$PH‰\$èH‹œ$P1íH9ë„)H‹œ$XHƒû„'H‹H‹CH‹œ$PHƒû„H‹+H‰¬$€H‹kH‰¬$ˆH‰”$H‹´$H‰´$°H‹Œ$H‰„$˜H‰Œ$¸H9È…ŠH‰$H‰D$H‰t$H‰L$è¶\$ €û„dH‹œ$ØH‹l$@L‹„$àL9ŃlHkípHëH‹SH‹KH‹[ H‰”$H‰Œ$H‰œ$H‰ØH)ËHƒû}OHH‰$H‰”$ðH‰T$H‰Œ$øH‰L$H‰„$H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$øH‰„$H‰ÓH‰”$ðH‰ÍHkíHëH‰$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹”$ðH‹Œ$øH‹„$H‹œ$ØH‹l$@L‹„$àL9Ń[HkípHëH‰$Hƒ$H‰”$H‰T$H‰Œ$H‰L$H‰„$H‰D$èHœ$PH‰$èH‹œ$P1íH9ë…×ýÿÿH‹T$hH‹D$PHÂHÿÀH‹l$HH9èŒgüÿÿH‹œ$8H‰$Hƒ<$„¶Hƒ$HèH‹œ$HH‰$H‹œ$@H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$HH‰$H‹œ$@H‹[0ÿÓHH‰$H‹œ$@H‰\$H‹œ$HH‰\$èH‹L$H‹D$ H‰Œ$ H‰L$pH‰„$¨H‰D$xHH‰$èH‹|$H‰ùHƒÿ„Á1ÀèH‰L$`H‰ $Hƒ<$„šH‹\$pH‰\$H‹\$xH‰\$èH‹\$`H‰\$XH‹œ$ØH‰œ$ H‹œ$àH‰œ$(H‹œ$èH‰œ$0HH‰$Hœ$ H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$XH‰$èHÄ0É%éZÿÿÿ‰é8ÿÿÿ‰%é>þÿÿè è ‰éòûÿÿ‰éÒûÿÿè ‰ésúÿÿ‰é$úÿÿ‰%é¯ùÿÿJ +00runtime.morestack_noctxt„*sync.(*RWMutex).RLockªdtype.[]github.com/fsouza/go-dockerclient.APIImagesÐ"runtime.makesliceøÈ runtime.duffcopy¾È runtime.duffcopyÒÈ runtime.duffzero¤`type.github.com/fsouza/go-dockerclient.APIImagesª.runtime.writebarrierfatæØ runtime.duffzeroô,type.map[string]stringª&runtime.mapiterinitö + runtime.eqstringÊ type.[]string¼ "runtime.growsliceö4runtime.writebarrierstringÚ2runtime.writebarriersliceü&runtime.mapiternext¨.sync.(*RWMutex).RUnlockà +ü0go.string."Content-Type"¢8go.string."application/json"Ê&net/http.Header.Set” +žtype.io.Writeräruntime.convI2Iº4type.encoding/json.EncoderÌ"runtime.newobjectþð runtime.duffzeroØ2runtime.writebarrierifaceÚdtype.[]github.com/fsouza/go-dockerclient.APIImages†runtime.convT2EÊ>encoding/json.(*Encoder).Encode¢$runtime.panicindex°$runtime.panicindexÚ$runtime.panicindex@à8"".autotmp_0483Ÿ6type.*encoding/json.Encoder"".autotmp_04826type.*encoding/json.Encoder"".autotmp_0481Ÿtype.io.Writer"".autotmp_0476ÿtype.[]string"".autotmp_0475Ïtype.[]string"".autotmp_0474type.string"".autotmp_0473ÿtype.string"".autotmp_0471ŸXtype.github.com/fsouza/go-dockerclient.Image"".autotmp_0470Ztype.*github.com/fsouza/go-dockerclient.Image"".autotmp_0469Ïtype.int"".autotmp_0468type.int"".autotmp_0467Ÿdtype.[]github.com/fsouza/go-dockerclient.APIImages"".autotmp_0465type.[]string"".autotmp_0464¿6type.map.iter[string]string"".autotmp_0462Ÿ`type.github.com/fsouza/go-dockerclient.APIImages"".autotmp_0461ï\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0459¿type.int "".~r0¯6type.*encoding/json.Encoder$encoding/json.w·2ÿtype.io.Writertime.t·2ßtype.time.Time +"".id¿type.string "".tagßtype.string"".image¿Xtype.github.com/fsouza/go-dockerclient.Image"".ißtype.int"".result¯dtype.[]github.com/fsouza/go-dockerclient.APIImages"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer%à‹ ßà_ hò%!M;{Wä$ "!Q# BA“@¦€rgÒFW", $Tgclocals·f7ba1512b6938de3ab7810c798567682Tgclocals·e0b091cc964057ade987c1196ae02e2eü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ8"".(*DockerServer).findImageàÄeH‹ %H;awèëêHƒìpHÇ„$HÇ„$˜HÇ„$ HÇ„$¨H‹\$xH‰$Hƒ<$„xHƒ$xèH‹\$xH‰$Hƒ<$„NHƒ$xH QjèYYH…À…$H‹”$€H‹Œ$ˆHH‰$H‹\$xH‹«H‰l$H‰T$`H‰T$H‰L$hH‰L$èH‹T$ ¶\$(Hƒú„ÄH‹2H‰t$@H‹jH‰l$H€ût3H‰´$H‰¬$˜HÇ„$ HÇ„$¨èHƒÄpÃH‹\$xH‰$H‹œ$€H‰\$H‹œ$ˆH‰\$èH‹t$H‹l$ H‹T$0H‹L$8H‰t$@H‰´$H‰l$HH‰¬$˜H‰T$PH‰”$ H‰L$XH‰Œ$¨èHƒÄpÉé5ÿÿÿèHƒÄpÉ%é¦þÿÿ‰%é|þÿÿ + 0runtime.morestack_noctxtÈ*sync.(*RWMutex).RLockˆ4sync.(*RWMutex).RUnlock·f˜"runtime.deferprocÜ,type.map[string]string¸4runtime.mapaccess2_faststrè&runtime.deferreturnÂ@"".(*DockerServer).findImageByIDÞ&runtime.deferreturn‚&runtime.deferreturnpà"".autotmp_0487type.string "".err?type.error"".image_type.string "".~r2Ptype.error "".~r10type.string +"".idtype.string"".s*type.*"".DockerServerBàoªßàzßàßà&ð0šJ3j3<? cxW.MCTgclocals·f7cb58e18cf0f9d3ee7dc7385e94aef7Tgclocals·660c52760819425e2fa6ae9a8a8ae931ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).findImageByID€î eH‹ %H„$¸ûÿÿH;AwèëâHìÈHÇ„$èHÇ„$ðHÇ„$øHÇ„$HÇ„$H‹œ$ÐH‰$Hƒ<$„óHƒ$xèH‹œ$ÐH‰$Hƒ<$„ÆHƒ$xH QjèL‹Œ$ðYYH…À…‘H‹œ$ÐHƒû„xL‹C`H‹ChH‹kpH‰¬$ 1ÒH‰„$˜H‰D$0L‰„$L‰ÁH‹l$0H9êL‰D$HIƒø„%H¬$¸H‰ïL‰ÆèH‰T$8H‰T$(Hœ$¸H¬$¨H‰ïH‰ÞèH‹´$¨H‰´$€H‹Œ$°H‰Œ$ˆL9É…H‰4$H‰L$H‹¬$ØH‰l$L‰L$èL‹Œ$àL‹D$HH‹T$8¶\$ €ûtSH‹œ$¨H‰œ$èH‹œ$°H‰œ$ðH‹\$(H‰œ$øHÇ„$HÇ„$èHÄÈÃIÀHÿÂH‹l$0H9êŒéþÿÿHH‹+H‰l$`H‹kH‰l$hHÇD$PHÇD$XHH‰$èH‹L$H‰L$@H‰ $Hƒ<$„¾H‹\$`H‰\$H‹\$hH‰\$èH‹\$@H‰\$@H‹ 1íH9ét[H‹T$@H‰L$pH‰T$xHÇ„$èHÇ„$ðHÇ„$øÿÿÿÿH‰L$PH‰Œ$H‰T$XH‰”$èHÄÈÃHH‰$HH‰\$HH‰\$èH‹L$ésÿÿÿ‰%é6ÿÿÿA‰éÓýÿÿ‰éýÿÿèHÄÈÉ%é.ýÿÿ‰%éýÿÿ( +00runtime.morestack_noctxtü*sync.(*RWMutex).RLockÂ4sync.(*RWMutex).RUnlock·fÒ"runtime.deferprocØÈ runtime.duffcopy¢È runtime.duffcopy´ runtime.eqstring‚&runtime.deferreturnÐ2go.string."No such image"¤ .type.errors.errorString¶ "runtime.newobjectš +4runtime.writebarrierstring¼ +Bgo.itab.*errors.errorString.errorð &runtime.deferreturnŽ 0type.*errors.errorString¤ type.error¼ Bgo.itab.*errors.errorString.errorÐ  runtime.typ2Itab¦ &runtime.deferreturn€$"".autotmp_0497¯type.error"".autotmp_04960type.*errors.errorString"".autotmp_0495type.string"".autotmp_0494ŸXtype.github.com/fsouza/go-dockerclient.Image"".autotmp_0493ÿZtype.*github.com/fsouza/go-dockerclient.Image"".autotmp_0492¯type.int"".autotmp_0491Ÿtype.int"".autotmp_04900type.*errors.errorString"".autotmp_0489ï\type.[]github.com/fsouza/go-dockerclient.Image "".~r0ïtype.errorerrors.text·2Ïtype.string"".image¿Xtype.github.com/fsouza/go-dockerclient.Image"".i¿type.int "".~r3`type.error "".~r2Ptype.int "".~r10type.string +"".idtype.string"".s*type.*"".DockerServerF%ÕöZ!€4°a!>•eS + (}œf[2j1*/Tgclocals·c958acb0df1ea67178a15bee7623bbbdTgclocals·52c6e5e411ef106b9194437a527f5a0fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".(*DockerServer).createContainer M†MeH‹ %H„$ÐûÿÿH;AwèëâHì°HH‰$èH‹\$H‰œ$¸H‹œ$ÐHƒû„èH‹S@H‹kHH‰¬$ˆH‰,$H‰”$€Hƒú„»HZ SjèYYH…À…–HH‰$H‹¼$ÐHƒÿ„rHo@H|$H‰îH¥H¥èH‹L$H‹D$ H‰Œ$°H‰Œ$àH‰„$¸H‰„$èHH‰$èH‹|$H‰ùHƒÿ„ +1ÀèH‰Œ$ H‰ $Hƒ<$„àH‹œ$àH‰\$H‹œ$èH‰\$èH‹”$ H‹Œ$¸H‰$H‰ÊH H‰Œ$pH‰L$H‰”$xH‰T$èH‹L$H‹T$ H‰”$HHƒùH‰Œ$@tcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$ÀH‰$H‹œ$ÈH‰\$H‰”$ðH‰T$H‰Œ$øH‰L$HÇD$ èèHÄ°ÃH‹œ$ÐH‹kH‰,$èH‹D$HH‹H‰”$ H‹KH‰Œ$(HÇ„$ÀHÇ„$È1íH9è…ï1Ò1ÉH‰”$ÀH‰”$H‰Œ$ÈH‰Œ$HƒùtlH‹H‰$H‰T$H‰L$è¶\$€ûuHH‹œ$ÀH‰$H‹´$ÈH‰t$H5Hl$H‰ïH¥H¥HÇD$ ôèèHÄ°ÃH‹œ$¸H‰$H‹´$¸H‹>Hƒÿ„1H·ÐH|$H¥H¥èH‹L$(H‹\$0H‰œ$XHƒùH‰Œ$PtcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$ÀH‰$H‹œ$ÈH‰\$H‰”$ðH‰T$H‰Œ$øH‰L$HÇD$ ”èèHÄ°ÃHH‰$HÇD$èH‹\$H‰\$hH‹œ$¸H‹H‹kxH¼$Ø1ÀèHH‰$H‰l$Hœ$ØH‰\$èH‹´$¸H‹œ$Ø1íH9ë„pH‹œ$ØHƒû„H‹H‹kH‰”$ðH‰”$àH‰¬$øH‰¬$èèH‹$H‰ÃHÁø?H%ÿÿHÃHãÿÿH)ÃH‰$èH‹\$H‰œ$ðH‹\$H‰œ$øHH‰$èH‹|$Hƒÿ„o H-H‰øH‰îèHƒø„L HÇÂHÇÁH‰„$¨H‰”$°H‰Œ$¸H‹œ$¨H‰$Hƒ$H‹œ$ðH‰\$H‹œ$øH‰\$èHH‰$H‹\$hH‰\$Hœ$àH‰\$Hœ$¨H‰\$èHœ$ØH‰$èH‹´$¸H‹œ$Ø1íH9ë…þÿÿHÇ„$HÇ„$HÇ„$ÀHÇ„$ÈHÇ„$ÐH‹.H‹¨Hƒû…n H‹.Hƒý„Y H‹ H‹…¨H‹°H‰œ$(H‰Œ$HƒøH‰„$ † H‹)H‰¬$H‹iH‰¬$H‹œ$¸H‰$èH‹L$H‹D$H‰„$8Hƒø ‚Ð +H‹œ$¸H‹+H‰,$Hƒ<$„ª +H‰Œ$0HÇÀ H‰Œ$ðH‰L$H‰„$øH‰D$èHH‰$èH‹\$H‰œ$°èH‹$H‰œ$‹\$‰œ$˜H‹\$H‰œ$ èH‹$H‰\$@èH‹$H‰œ$x‹\$‰œ$€H‹\$H‰œ$ˆèH‹4$H‰óI¸Ï÷Sã¥›Ä H‰ðI÷èH‰ÕHÁýHÁû?H)ÝH‰ëHiÛúH‰õH)ÝH‰ëHƒÃH‰\$HHœ$ÐHÇHÇCHœ$ÐHƒû„‰ HÇÆHÇÅH‰œ$HH‰´$PH‰¬$XHH‰$H\$HH‰\$èH‹D$H‹l$H‹œ$HH‰$H‰„$pH‰D$H‰¬$xH‰l$èHH,$H‰ïH‰ÞH¥H¥H‹œ$HH‰\$H‹œ$PH‰\$H‹œ$XH‰\$ èL‹T$@L‹Œ$¸H‹\$(H‰œ$ðH‹t$0H‰´$øH5H¬$(H‰ïèH‹œ$H‰œ$HH‹œ$H‰œ$PH‹œ$0H‰œ$(H‹œ$8H‰œ$0H‹œ$H‰œ$8‹œ$˜‰œ$@H‹œ$ H‰œ$HH‹œ$H‰œ$PH‹œ$H‰œ$XH‹œ$ÀH‰œ$`H‹œ$ÈH‰œ$hH‹œ$ÐH‰œ$pI‹H‰œ$xI‹YH‰œ$xL‰ÓI¸ áÑÆkñ)L‰ÐI÷èH‰ÕHÁý HÁû?H)ÝH‰ëHiÛPÃL‰ÕH)ÝH‰¬$ˆH‹œ$xH‰œ$¨‹œ$€‰œ$°H‹´$ˆH‰´$¸I‹9Hƒÿ„EH·ÐH¼$ØH¥H¥HH‰$èH‹|$H‰ùHƒÿ„ 1ÀèH‰Œ$˜H‰ $Hƒ<$„ßH‹œ$ðH‰\$H‹œ$øH‰\$èH‹„$˜HÇ@Hh(HH‰ïH‰ÞH¥H¥Hh8HH‰ïH‰ÞH¥H¥H‰$Hƒ<$„kHƒ$PH‹\$hH‰\$èH‹œ$˜H‰œ$ðHH‰$H‹œ$°H‰\$Hœ$(H‰\$èH‹œ$¸H‰$Hƒ<$„úHƒ$HèH‹¼$°H‹Ÿ(Hƒû„ÈH‹œ$¸Hƒû„¾H‹ H‹CH‹kH‰¬$p1ÒH‰„$hH‰D$PH‰Œ$`H‰Œ$H‹l$PH9êvH‹œ$H‹+H‰¬$ˆH‰T$@H‹œ$ˆH‰\$xH‹\$xHƒû„CH‹³ H‰´$ðH‹‹(H‹¯ H‰¬$ H‹¯(H‰¬$¨H‰Œ$øH‹¬$¨H9é…ÐH‰4$H‰L$H‹¬$ H‰l$H‹¬$¨H‰l$èH‹¼$°H‹T$@¶\$ €û„H‹œ$¸H‰$Hƒ<$tqHƒ$HH QjèYYH…ÀuHH‹œ$ÀH‰$H‹´$ÈH‰t$H5Hl$H‰ïH¥H¥HÇD$ ™èèHÄ°ÃèHÄ°É%ë†H‹œ$HƒÃH‰œ$HÿÂH‹l$PH9ꌊþÿÿH‰¼$ˆH‹œ$¸Hƒû„àH‹H‹KH‹kH‰”$`H‰”$0H‰Œ$hH‰Œ$8H‰¬$pH‰¬$@H‹œ$@H‹¬$8H)ëHƒû}gHH‰$H‹œ$0H‰\$H‹œ$8H‰\$H‹œ$@H‰\$HÇD$ èH‹\$(H‰œ$0H‹\$0H‰œ$8H‹\$8H‰œ$@H‹”$8H‰ÐHÿÂH‹Œ$@H‹¬$0H‰¬$0H‰”$8H‰Œ$@H‹œ$0HÃH‰$H‹œ$ˆH‰\$èH‹”$0H‹Œ$8H‹„$@H‹œ$¸H‰$Hƒ<$„’H‰”$`H‰T$H‰Œ$hH‰L$H‰„$pH‰D$èH‹œ$¸H‰$Hƒ<$„CHƒ$HèHÇD$ÉH‹œ$ÈH‰$H‹œ$ÀH‹[0ÿÓH‹œ$¸H‰\$`H‹œ$°H‰œ$ˆH‹œ$ˆH‰\$pH‹l$`H‹81íH9ët;H‹\$pH‰œ$¨HH‰$H‹\$`H‹«8H‰l$Hœ$¨H‰\$èHœ$`HÇHÇCH‹¼$°H/H¼$`H‰îH¥H¥HH‰$H‹œ$ÀH‰\$H‹œ$ÈH‰\$èH‹\$H‰œ$H‹\$ H‰œ$˜H‹œ$H‰œ$ÐH‹œ$˜H‰œ$ØHÇD$XHH‰$èH‹\$H‰œ$€H‹¼$€Hƒÿ„Ê1ÀèH‹œ$€H‰$Hƒ<$„ H‹œ$ÐH‰\$H‹œ$ØH‰\$èH‹œ$€H‰œ$€H‹´$€H‰t$XH´$`H¬$ÀH‰ïH¥H¥HH‰$Hœ$ÀH‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$XH‰$èèHÄ°É%éTÿÿÿ‰é/ÿÿÿ‰%é±ýÿÿ‰%ébýÿÿ‰éüÿÿ‰é¶úÿÿ‰é;úÿÿ‰%éúùÿÿ‰%é‰ùÿÿ‰%éùÿÿ‰éðøÿÿ‰é´øÿÿ‰épöÿÿ‰%éJõÿÿè è ‰EéŸôÿÿH‹.H‹¨HƒûŽÙôÿÿH‹.Hƒý„¯H‹ H‹…¨H‹°H‰œ$(H‰Œ$HƒøH‰„$ vuH‹)H‰¬$H‹iH‰¬$H‹.H‹•°H‹.H‹¨Hƒùr=H‹H‹ƒ H‰ÍHÿÍH‰ÑHÿÉHƒùtHƒÀH‰„$ÀH‰¬$ÈH‰Œ$Ðé+ôÿÿè è ‰EéIÿÿÿ‰é­òÿÿ‰éŠòÿÿ‰é÷ñÿÿ‰éÈðÿÿHÇ„$HÇ„$HÇ„$HH‰$H‰D$H‰”$ðH‰T$H‰Œ$øH‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$H‹PH‰”$H‹hH‰¬$€ûtHƒútHƒúv H‹H‹Nérïÿÿè 1Ò1Éébïÿÿ‰ë¬‰%éîÿÿ‰éïíÿÿ‰é‡íÿÿèHÄ°Éé>íÿÿ‰éíÿÿÈ +00runtime.morestack_noctxtPætype.struct { *github.com/fsouza/go-dockerclient.Config; HostConfig *github.com/fsouza/go-dockerclient.HostConfig }b"runtime.newobject„"runtime.deferproc¨type.io.Readerþruntime.convI2Ià4type.encoding/json.Decoderò"runtime.newobject¤Ä runtime.duffzero2runtime.writebarrierifaceÌètype.*struct { *github.com/fsouza/go-dockerclient.Config; HostConfig *github.com/fsouza/go-dockerclient.HostConfig }Š>encoding/json.(*Decoder).Decodeâ +ônet/http.Error€&runtime.deferreturnº(net/url.(*URL).QueryÒ go.string."name"¨ +"".nameRegexpÎ +8regexp.(*Regexp).MatchString¢ Dgo.string."Invalid container name"Ö net/http.Errorâ &runtime.deferreturnÞ 8"".(*DockerServer).findImage¶ +Ènet/http.ErrorÔ&runtime.deferreturnò¾type.map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding–runtime.makemapæØ runtime.duffzeroôrtype.map[github.com/fsouza/go-dockerclient.Port]struct {}ª&runtime.mapiterinitÜmath/rand.Intªstrconv.Itoaìjtype.[1]github.com/fsouza/go-dockerclient.PortBindingþ"runtime.newobjectª""".statictmp_0534À runtime.duffcopy€4runtime.writebarrierstringŽ¾type.map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingè$runtime.mapassign1Š&runtime.mapiternext¸:"".(*DockerServer).generateID€4runtime.writebarrierstringŽ`type.github.com/fsouza/go-dockerclient.Container "runtime.newobjectÄtime.Now–math/rand.Int²time.Now„math/rand.Int²type.intØruntime.convT2E 2runtime.writebarrierifaceÐ 0go.string."172.16.42.%d"Ä!fmt.Sprintf """".statictmp_0542À"¤ runtime.duffcopy†(ltype.github.com/fsouza/go-dockerclient.NetworkSettings˜("runtime.newobjectÊ(„ runtime.duffzero¶)4runtime.writebarrierstringì).go.string."172.16.42.1"–*&go.string."docker0"ð*.runtime.writebarrierptrž+`type.github.com/fsouza/go-dockerclient.Containerä+.runtime.writebarrierfat¦,(sync.(*RWMutex).LockÎ0 runtime.eqstringÂ12sync.(*RWMutex).Unlock·fÒ1"runtime.deferproc 2lgo.string."there's already a container with this name"Ô2net/http.Errorà2&runtime.deferreturnü2&runtime.deferreturnÒ5ftype.[]*github.com/fsouza/go-dockerclient.ContainerÄ6"runtime.growsliceÂ8.runtime.writebarrierptrø92runtime.writebarriersliceº:,sync.(*RWMutex).Unlock„; +¢<ptype.chan<- *github.com/fsouza/go-dockerclient.Containerð<"runtime.chansend1à=type.io.Writer¦>runtime.convI2Iº?4type.encoding/json.EncoderÌ?"runtime.newobject˜@ð runtime.duffzero„A2runtime.writebarrierifaceúA2type.struct { ID string }¦Bruntime.convT2EêB>encoding/json.(*Encoder).EncodeöB&runtime.deferreturnšE$runtime.panicslice¨E$runtime.panicindexÊH$runtime.panicsliceØH$runtime.panicindexúI&type.net/url.ValuesÊJ4runtime.mapaccess2_faststrôK$runtime.panicindexÒL&runtime.deferreturn@àŽ"".autotmp_0560ß6type.*encoding/json.Encoder"".autotmp_05596type.*encoding/json.Encoder"".autotmp_0558¿ type.io.Writer"".autotmp_0557btype.*github.com/fsouza/go-dockerclient.Container"".autotmp_0556type.uint64"".autotmp_0555type.uint64"".autotmp_0554type.int"".autotmp_0553type.int"".autotmp_0552ÿ ftype.[]*github.com/fsouza/go-dockerclient.Container"".autotmp_0551ftype.[]*github.com/fsouza/go-dockerclient.Container"".autotmp_0550btype.*github.com/fsouza/go-dockerclient.Container"".autotmp_0549Ÿ type.string"".autotmp_0548type.string"".autotmp_0547Ïbtype.*github.com/fsouza/go-dockerclient.Container"".autotmp_0546¿dtype.**github.com/fsouza/go-dockerclient.Container"".autotmp_0545type.int"".autotmp_0544type.int"".autotmp_0543¯ntype.*github.com/fsouza/go-dockerclient.NetworkSettings"".autotmp_0541"type.interface {}"".autotmp_0539Ï &type.[]interface {}"".autotmp_0538type.uint64"".autotmp_0531Ÿ6type.*encoding/json.Decoder"".autotmp_05306type.*encoding/json.Decoder"".autotmp_0529ÿ type.io.Reader"".autotmp_0528ß 2type.struct { ID string }"".autotmp_0527btype.*github.com/fsouza/go-dockerclient.Container"".autotmp_0526ftype.[]*github.com/fsouza/go-dockerclient.Container"".autotmp_0525Ÿ ftype.[]*github.com/fsouza/go-dockerclient.Container"".autotmp_0524`type.github.com/fsouza/go-dockerclient.Container"".autotmp_0523type.string"".autotmp_0522Ïtype.int"".autotmp_0521type.int"".autotmp_0520¿ (type.[1]interface {}"".autotmp_0519ïtype.time.Time"".autotmp_0518type.int"".autotmp_0517¿type.time.Time"".autotmp_0516type.string"".autotmp_0515type.int"".autotmp_0514type.int"".autotmp_0513htype.[]github.com/fsouza/go-dockerclient.PortBinding"".autotmp_0512type.string"".autotmp_0511type.int"".autotmp_0510Ÿ Vtype.github.com/fsouza/go-dockerclient.Port"".autotmp_0509¯|type.map.iter[github.com/fsouza/go-dockerclient.Port]struct {}"".autotmp_0507type.string"".autotmp_0505¿type.int"".autotmp_0504type.string"".autotmp_0502ÿ +type.string"".&containerÿbtype.*github.com/fsouza/go-dockerclient.Container"".&configïètype.*struct { *github.com/fsouza/go-dockerclient.Config; HostConfig *github.com/fsouza/go-dockerclient.HostConfig } "".~r0¯6type.*encoding/json.Encoder$encoding/json.w·2¿type.io.Writer"".containerÿbtype.*github.com/fsouza/go-dockerclient.Container"".sŸ*type.*"".DockerServer "".~r0ßtype.stringnet/url.vs·4ß +type.[]stringnet/url.key·3Ÿtype.string$encoding/json.r·2Ÿtype.io.Reader"".cŸ 2type.struct { ID string }"".cïbtype.*github.com/fsouza/go-dockerclient.Container"".generatedIDÿ type.string"".argsßtype.[]string"".pathßtype.string"".portÿVtype.github.com/fsouza/go-dockerclient.Port"".ports¾type.map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding "".err¿ type.error"".name¿type.string "".errß type.error"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerŒ%àZƒßàðßà¸ßà°Lßà ßàüßàíßàÐ&ºÆ%OŠUw2:VU#t FÅ,$[ Z.%))Ìó(¼':.)wŸ.:%ˆ!#x1Ì   ' ! WR  · )Ä0QwO=-kŠDD-oJYQE×d")7ªvªO]:!”B)¹,[¼[S\Q"9ëUWTgclocals·d1e6514bc516778716e9d38209cf4ab8Tgclocals·37832594314d999d6d2b5ee19691d36fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ:"".(*DockerServer).generateIDàÌeH‹ %HD$àH;AwèëåHì HÇ„$°HÇ„$¸HH‰$èH‹l$H‰l$HHƒý„@HÇÂHÇÁH‰l$pH‰,$H‰T$xH‰T$H‰Œ$€H‰L$èH‹t$HHl$8H‰ïH¥H¥H\$`HÇHÇCH\$`Hƒû„ÒHÇÁHÇÂH‰œ$ˆH‰Œ$H‰”$˜HH‰$H\$8H‰\$èH‹L$H‹D$H‹œ$ˆH‰$H‰L$PH‰L$H‰D$XH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$ˆH‰\$H‹œ$H‰\$H‹œ$˜H‰\$ èH‹L$(H‹D$0H‰Œ$°H‰„$¸HĠÉé'ÿÿÿ‰Eé¸þÿÿ +*0runtime.morestack_noctxtztype.[16]uint8Œ"runtime.newobjectš crypto/rand.ReadÜtype.[16]uint8‚runtime.convT2Eà2runtime.writebarrierifaceîgo.string."%x"âfmt.Sprintf0À"".autotmp_0587Ÿ"type.interface {}"".autotmp_0585/&type.[]interface {}"".autotmp_0582Ïtype.[16]uint8"".autotmp_0581(type.[1]interface {}"".&buf¯type.*[16]uint8 "".~r0type.string"".s*type.*"".DockerServer"Àô¿À°è:BEGtp@Tgclocals·1ee14e32cec51f1cde6c2b0577d81887Tgclocals·80320eec1018401d2b0daec3b250b99eü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".(*DockerServer).renameContaineràÆeH‹ %HD$ H;AwèëåHìàH‹œ$H‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰´$ H‰t$H‰¬$¨H‰l$èH‹\$ Hƒû„H‹H‹kH‹œ$èH‰$H‰T$pH‰T$H‰l$xH‰l$èH‹\$H‰\$@H‹\$ H‰\$8H‹L$(H‹\$0H‰œ$ˆHƒùH‰Œ$€tcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$ðH‰$H‹œ$øH‰\$H‰”$ H‰T$H‰Œ$¨H‰L$HÇD$ ”èèHÄàÃHH‰$èH‹D$HH‰$H‰D$HH‰D$H‹\$@H‰\$Hƒ|$„ èH‹œ$H‹kH‰,$èH‹D$HH‹H‰T$`H‹KH‰L$hHÇD$PHÇD$X1íH9è…þ1Ò1ÉH‹\$HH‰$H$ H‰T$PH‰T$H‰L$XH‰L$èH‹œ$èH‰$Hƒ<$„­Hƒ$HèH‹œ$èH‰$Hƒ<$„€Hƒ$HH QjèYYH…À…SH‹œ$èHƒû„:H‹ H‹CH‹kH‰¬$ØH‰Œ$ÈH‹l$8H‰„$ÐH9ŃHéH‹+Hƒý„éH‹uH‰´$ H‹MH‹\$HH‹H‰”$H‹CH‰Œ$¨H‰„$˜H9ÁutH‰4$H‰L$H‰T$H‰D$è¶\$ €ûtRH‹œ$èHƒût|H‹ H‹CH‹kH‰¬$ØH‰Œ$ÈH‹l$8H‰„$ÐH9ÅsHHéH‰$H‹\$HH‰\$èHÇD$ÌH‹œ$øH‰$H‹œ$ðH‹[0ÿÓèHÄàÃè ‰뀉Eéÿÿÿè ‰é¿þÿÿèHÄàÉ%étþÿÿ‰%éGþÿÿHÇ„$°HÇ„$¸HÇ„$ÀHH‰$H‰D$H‰”$ H‰T$H‰Œ$¨H‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$°H‹PH‰”$¸H‹hH‰¬$À€ûtHƒútHƒúv H‹H‹Nécýÿÿè 1Ò1ÉéSýÿÿ‰ë¬‰%éëüÿÿ‰éáûÿÿ< +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrØ@"".(*DockerServer).findContainerØ +ênet/http.Errorö&runtime.deferreturn”`type.github.com/fsouza/go-dockerclient.Container¦"runtime.newobject¾`type.github.com/fsouza/go-dockerclient.Container.runtime.writebarrierfatº(net/url.(*URL).QueryÒ go.string."name"Š4runtime.writebarrierstringÌ(sync.(*RWMutex).Lock’ 2sync.(*RWMutex).Unlock·f¢ "runtime.deferproc‚  runtime.eqstringº .runtime.writebarrierptr„ +Œ&runtime.deferreturn¦$runtime.panicindexÌ$runtime.panicindexê&runtime.deferreturn€&type.net/url.ValuesÐ4runtime.mapaccess2_faststrú$runtime.panicindex@À""".autotmp_0604type.string"".autotmp_0603type.string"".autotmp_0600type.string"".autotmp_0598type.string"".autotmp_0597Ÿtype.string"".autotmp_0596type.string"".©¯btype.*github.com/fsouza/go-dockerclient.Container "".~r0Ÿtype.stringnet/url.vs·4_type.[]stringnet/url.key·3ÿtype.string "".err¿type.error"".indexÏtype.int"".container¿btype.*github.com/fsouza/go-dockerclient.Container +"".idßtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerH"À¤¿À‡º¿À.¿Àï° \ô"iKUE}!6¯R#  · $.¤5ÕËU4Tgclocals·bc335ce91c3a8b5f426dd201465802bdTgclocals·5699c890da9a4c1a61d89d978591d077ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþF"".(*DockerServer).inspectContainerÀ +¢ +eH‹ %HD$ØH;AwèëåHì¨H‹œ$ÈH‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$˜H‰T$H‰¬$ H‰l$èH‹\$ Hƒû„H‹ H‹kH‹œ$°H‰$H‰L$XH‰L$H‰l$`H‰l$èH‹\$H‰\$8H‹D$(H‹\$0H‰\$pHƒøH‰D$ht]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$¸H‰$H‹œ$ÀH‰\$H‰Œ$˜H‰L$H‰„$ H‰D$HÇD$ ”èHĨÃH‹œ$ÀH‰$H‹œ$¸H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$ÀH‰$H‹œ$¸H‹[0ÿÓHH‰$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹L$H‹D$ H‰Œ$ˆH‰L$HH‰„$H‰D$PHH‰$èH‹L$H‰ÏHƒùtu1ÀèH‰L$@H‰ $Hƒ<$tUH‹\$HH‰\$H‹\$PH‰\$èH‹L$@H‹D$8H‰ $H‰ÁHH‰D$xH‰D$H‰Œ$€H‰L$èHĨÉ%뢉뇉éóýÿÿ, +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrØ@"".(*DockerServer).findContainer¸ +Ênet/http.Error’ +®0go.string."Content-Type"Ô8go.string."application/json"ü&net/http.Header.SetÆ +Ðtype.io.Writer–runtime.convI2Iì4type.encoding/json.Encoderþ"runtime.newobject¨ð runtime.duffzeroú2runtime.writebarrierifaceª btype.*github.com/fsouza/go-dockerclient.Containerâ >encoding/json.(*Encoder).Encode@Ð"".autotmp_0615Ï6type.*encoding/json.Encoder"".autotmp_06146type.*encoding/json.Encoder"".autotmp_0613?type.io.Writer"".autotmp_0610type.string"".autotmp_0608type.string$encoding/json.w·2¿type.io.Writer "".errtype.error"".containerßbtype.*github.com/fsouza/go-dockerclient.Container +"".idŸtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer("ÐŽÏÐËÏÐ# 6–"i> UQ#Ð .›µ>40Tgclocals·cec9627e2837f98af62e9c7580b3baccTgclocals·04f43ee17c64d5db43a23c286d1bf236ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).statsContainer žeH‹ %H„$ÈôÿÿH;AwèëâHì¸ H‹œ$Ø H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$PH‰T$H‰¬$XH‰l$èH‹\$ Hƒû„H‹ H‹kH‹œ$À H‰$H‰Œ$ H‰L$H‰¬$(H‰l$èH‹D$(H‹\$0H‰œ$8HƒøH‰„$0t]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$È H‰$H‹œ$Ð H‰\$H‰Œ$PH‰L$H‰„$XH‰D$HÇD$ ”èHĸ ÃH‹œ$Ø H‹kH‰,$èH‹D$HH‹H‰”$H‹KH‰Œ$HÇ„$ðHÇ„$ø1íH9è…À1Ò1ÀH‰”$ðH‰$H‰„$øH‰D$è¶\$ˆœ$×H‹”$ H‹„$(HH‰$H‹œ$À H‹«H‰l$H‰”$PH‰T$H‰„$XH‰D$èH‹\$ H‹+H‰¬$àH‹œ$Ð H‰$H‹œ$È H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$Ð H‰$H‹œ$È H‹[0ÿÓHH‰$H‹œ$È H‰\$H‹œ$Ð H‰\$èH‹T$H‹D$ H‰”$@H‰”$H‰„$HH‰„$HH‰$èH‹|$H‰úHƒÿ„C1ÀèH‰”$èH‰$Hƒ<$„H‹œ$H‰\$H‹œ$H‰\$èH‹”$àH‹œ$èH‰œ$ØH¼$x1Àè1íH9êtQH‹œ$ H‰$H‹œ$(H‰\$H‹ÿÓH\$H¬$øH‰ïH‰ÞèHœ$øH¬$xH‰ïH‰ÞèHœ$xH¬$8H‰ïH‰ÞèHH‰$Hœ$8H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹œ$ØH‰$èH‹”$à€¼$×…&ÿÿÿHĸ É%éÛþÿÿ‰é¶þÿÿHÇ„$`HÇ„$hHÇ„$pHH‰$H‰D$H‰”$PH‰T$H‰Œ$XH‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$`H‹PH‰”$hH‹hH‰¬$p€ûtHƒútHƒúv H‹H‹Fé¡üÿÿè 1Ò1Àé‘üÿÿ‰ë¬‰éxûÿÿH +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars|go.string."id"˜,type.map[string]stringè4runtime.mapaccess1_faststrê@"".(*DockerServer).findContainer +Ônet/http.ErrorŽ(net/url.(*URL).Query¦$go.string."stream"Þ"strconv.ParseBool¤ˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.Stats’4runtime.mapaccess1_faststrê +† 0go.string."Content-Type"¬ 8go.string."application/json"Ô &net/http.Header.Setž + +¨ +type.io.Writerî +runtime.convI2IÐ 4type.encoding/json.Encoderâ "runtime.newobject” ð runtime.duffzero€ 2runtime.writebarrierifaceÎ   runtime.duffzeroœ +Èà runtime.duffcopyþà runtime.duffcopy´à runtime.duffcopyÂXtype.github.com/fsouza/go-dockerclient.Statsîruntime.convT2E¸>encoding/json.(*Encoder).Encodeð&type.net/url.ValuesÀ4runtime.mapaccess2_faststrê$runtime.panicindex@ð.,"".autotmp_0633Ÿ#6type.*encoding/json.Encoder"".autotmp_06326type.*encoding/json.Encoder"".autotmp_0631ï!type.io.Writer"".autotmp_0629ÿXtype.github.com/fsouza/go-dockerclient.Stats"".autotmp_0628ÿ +Xtype.github.com/fsouza/go-dockerclient.Stats"".autotmp_0625type.string"".autotmp_0623type.string"".autotmp_0621type.string"".autotmp_0619Ï!type.string$encoding/json.w·2ï"type.io.Writer "".~r0#type.stringnet/url.vs·4¯!type.[]stringnet/url.key·3Ï"type.string"".statsÿ Xtype.github.com/fsouza/go-dockerclient.Stats"".encoder¿#6type.*encoding/json.Encoder"".callback¯#rtype.func(string) github.com/fsouza/go-dockerclient.Stats"".streamÁ#type.bool "".err"type.error +"".id¯"type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer*%ð.ï.ð.‡ï.ð.Ò +Z®%i=U‰^Q#ËQe· :1ƒ-eï»OOh© UTgclocals·7a383875e23784cb158d762414ce6278Tgclocals·bdc1cfaf863af97c7b8d007001384e8aü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ>"".(*DockerServer).topContainerÀ²eH‹ %H„$ðþÿÿH;AwèëâHìH‹œ$°H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$ØH‰T$H‰¬$àH‰l$èH‹\$ Hƒû„KH‹ H‹kH‹œ$˜H‰$H‰L$xH‰L$H‰¬$€H‰l$èH‹´$ H‹”$¨H‹|$H‹D$(H‹\$0H‰œ$HƒøH‰„$ˆt]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$ H‰$H‹œ$¨H‰\$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ ”èHÄÃH‰|$X¶_X€û…_HÇD$ôH‰$H‹^0ÿÓH‹\$xH‰œ$¸H‹œ$€H‰œ$ÀHœ$ÈHÇHÇCHœ$ÈHƒû„ÿHÇÂHÇÁH‰œ$H‰”$ H‰Œ$(HH‰$Hœ$¸H‰\$èH‹L$H‹D$H‹œ$H‰$H‰Œ$¨H‰L$H‰„$°H‰D$èHH‰$H‹œ$ H‰\$H‹œ$¨H‰\$èH\$H,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥H‹œ$H‰\$ H‹œ$ H‰\$(H‹œ$(H‰\$0èHÄÉéúþÿÿH‰$H‹^ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$¨H‰$H‹œ$ H‹[0ÿÓH‹|$XHƒÿ„ŸHo8H<$H‰îH¥H¥H¥HHl$H‰ïH‰ÞH¥H¥èH‹\$(H‰œ$ØH‹\$0H‰œ$àH¼$01ÀèHH‰$èH‹|$Hƒÿ„$H-H‰øH‰îèHƒø„HÇÂHÇÅH‰„$H‰„$0H‰”$H‰”$8H‰¬$H‰¬$@HH‰$èH‹\$Hƒû„HÇÅHÇÂH‰œ$èH‰¬$ðH‰”$øHH‰$èH‹|$Hƒÿ„QH-H‰øH‰îèHƒø„.HÇÆHÇÂH‰„$H‰´$H‰”$H‹|$XHƒÿ„òHw(H<$H¥H¥HHl$H‰ïH‰ÞH¥H¥H‹œ$ØH‰\$ H‹œ$àH‰\$(èH\$0Hl$H‰ïH‰ÞH¥H¥H‹œ$HƒÃpH‰$èH‹œ$èH‰$H‹œ$H‰\$H‹œ$H‰\$H‹œ$H‰\$èH‹œ$èH‰œ$HH‹œ$ðH‰œ$PH‹œ$øH‰œ$XHH‰$H‹œ$ H‰\$H‹œ$¨H‰\$èH‹T$H‹D$ H‰”$˜H‰T$hH‰„$ H‰D$pHH‰$èH‹|$H‰úHƒÿ„©1ÀèH‰T$`H‰$Hƒ<$„‚H‹\$hH‰\$H‹\$pH‰\$èH‹t$`H‰t$PH´$0H¬$`H‰ïèHH‰$Hœ$`H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$PH‰$èHÄÉ%érÿÿÿ‰éPÿÿÿ‰éþÿÿ‰éËýÿÿ‰é¨ýÿÿ‰é\ýÿÿ‰éøüÿÿ‰éÕüÿÿ‰éZüÿÿ‰é®ùÿÿb +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars|go.string."id"˜,type.map[string]stringè4runtime.mapaccess1_faststrä@"".(*DockerServer).findContaineræ +ønet/http.ErrorÖ +¸type.stringäruntime.convT2EÎ2runtime.writebarrierifaceÜtype.io.Writer¢ runtime.convI2IÖ Ngo.string."Container %s is not running"Ì +fmt.Fprintf‚ +ž 0go.string."Content-Type"Ä 8go.string."application/json"ì &net/http.Header.Set¶ +€ go.string." "¨ strings.Joinú è runtime.duffzeroˆtype.[8]stringš"runtime.newobjectÆ""".statictmp_0647ÜÀ runtime.duffcopyú type.[1][]stringŒ"runtime.newobject„type.[8]string–"runtime.newobjectÂ""".statictmp_0652ØÀ runtime.duffcopyügo.string." "Ø*runtime.concatstring3ª4runtime.writebarrierstringš2runtime.writebarriersliceˆtype.io.WriterÎruntime.convI2I¤4type.encoding/json.Encoder¶"runtime.newobjectèð runtime.duffzeroÂ2runtime.writebarrieriface†Ø runtime.duffcopy”`type.github.com/fsouza/go-dockerclient.TopResultÀruntime.convT2E„>encoding/json.(*Encoder).Encode@ 0"".autotmp_0656ß6type.*encoding/json.Encoder"".autotmp_06556type.*encoding/json.Encoder"".autotmp_0654ïtype.io.Writer"".autotmp_0653type.*[8]string"".autotmp_0651type.[]string"".autotmp_0649Ïtype.[][]string"".autotmp_0646Ÿtype.[]string"".autotmp_0645Ï"type.interface {}"".autotmp_0643ï&type.[]interface {}"".autotmp_0642_`type.github.com/fsouza/go-dockerclient.TopResult"".autotmp_0641type.string"".autotmp_0639¯type.string"".autotmp_0638(type.[1]interface {}"".autotmp_0637type.string"".autotmp_0635ïtype.string "".~r0ÿ6type.*encoding/json.Encoder$encoding/json.w·2Ïtype.io.Writer"".result¿`type.github.com/fsouza/go-dockerclient.TopResult "".errtype.error"".containerïbtype.*github.com/fsouza/go-dockerclient.Container +"".id¯type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer6% ¢Ÿ éŸ ÛŸ Rà ~Þ%iOU½A#UI0EÚ80 …R1€Bx†´Ë¾¡aZ4F?"_Tgclocals·a484a676faa0084ad5f98b43c17e101cTgclocals·950db48493931155c4d72d2be7776567ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).startContaineràÎeH‹ %HD$¸H;AwèëåHìÈH‹œ$èH‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰´$¸H‰t$H‰¬$ÀH‰l$èH‹\$ Hƒû„H‹H‹kH‹œ$ÐH‰$H‰T$hH‰T$H‰l$pH‰l$èH‹\$H‰\$8H‹L$(H‹\$0H‰œ$€HƒùH‰L$xtcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$ØH‰$H‹œ$àH‰\$H‰”$¸H‰T$H‰Œ$ÀH‰L$HÇD$ ”èèHÄÈÃH‹œ$ÐH‰$Hƒ<$„CHƒ$HèH‹œ$ÐH‰$Hƒ<$„Hƒ$HH QjèYYH…À…éH‹œ$èHƒû„ÐH‹S@H‹kHH‰¬$ H‰,$H‰”$˜Hƒú„£HZ SjèYYH…À…~HH‰$èH‹\$H‰\$PHH‰$H‹¼$èHƒÿ„@Ho@H|$H‰îH¥H¥èH‹L$H‹D$ H‰Œ$¨H‰L$XH‰„$°H‰D$`HH‰$èH‹L$H‰ÏHƒù„Þ1ÀèH‰L$@H‰ $Hƒ<$„·H‹\$XH‰\$H‹\$`H‰\$èH‹T$@H‹L$PH‰$H‰ÊH H‰Œ$ˆH‰L$H‰”$H‰T$èH‹T$H‹L$ H‰Œ$€HƒúH‰T$xtcH‰ $H‹Z ÿÓH‹T$H‹L$H‹œ$ØH‰$H‹œ$àH‰\$H‰”$¸H‰T$H‰Œ$ÀH‰L$HÇD$ ôèèHÄÈÃH‹\$8H‰$Hƒ<$„ÇH$PH‹\$PH‰\$èH‹T$8¶ZX€ûtHH‹œ$ØH‰$H‹´$àH‰t$H5Hl$H‰ïH¥H¥HÇD$ èèHÄÈÃHÇÅ@ˆjXH‹Œ$ÐH‹™81íH9ët+H‰T$HHH‰$H‹©8H‰l$H\$HH‰\$èèHÄÈÉ%é-ÿÿÿ‰%é=þÿÿ‰éþÿÿ‰é¹ýÿÿèHÄÈÉéVýÿÿ‰é)ýÿÿèHÄÈÉ%éÞüÿÿ‰%é±üÿÿ‰éÝûÿÿH +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrØ@"".(*DockerServer).findContainer¾ +Ðnet/http.ErrorÜ&runtime.deferreturn®(sync.(*RWMutex).Lockô2sync.(*RWMutex).Unlock·f„"runtime.deferproc¢"runtime.deferprocÆbtype.github.com/fsouza/go-dockerclient.HostConfigØ"runtime.newobjectútype.io.ReaderÐruntime.convI2I¦ 4type.encoding/json.Decoder¸ "runtime.newobjectê Ä runtime.duffzeroÄ +2runtime.writebarrierifaceô +dtype.*github.com/fsouza/go-dockerclient.HostConfig² >encoding/json.(*Decoder).Decode„ +– net/http.Error¢ &runtime.deferreturnˆ.runtime.writebarrierpträJgo.string."Container already running"˜net/http.Error¤&runtime.deferreturnŽptype.chan<- *github.com/fsouza/go-dockerclient.ContainerÌ"runtime.chansend1Ø&runtime.deferreturnÀ&runtime.deferreturnø&runtime.deferreturn@"".autotmp_06796type.*encoding/json.Decoder"".autotmp_06786type.*encoding/json.Decoder"".autotmp_0677?type.io.Reader"".autotmp_0676ÿbtype.*github.com/fsouza/go-dockerclient.Container"".autotmp_0675type.string"".autotmp_0672type.string"".autotmp_0670type.string"".&hostConfigïdtype.*github.com/fsouza/go-dockerclient.HostConfig$encoding/json.r·2ßtype.io.Reader "".errŸtype.error"".containerŸbtype.*github.com/fsouza/go-dockerclient.Container +"".id¿type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerz"—EF…€Y3(° rŒ"iA U!6Oõ U0 : A  :.¨Ñ4F7*HZ + †Tgclocals·f3828558443ce662a87feff12c09632bTgclocals·9cd0f1c7734d56b3c926d71ae19f8ec3ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).stopContainerà  eH‹ %H;awèëêHƒìxH‹œ$˜H‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰t$hH‰t$H‰l$pH‰l$èH‹\$ Hƒû„äH‹H‹kH‹œ$€H‰$H‰T$HH‰T$H‰l$PH‰l$èH‹\$H‰\$8H‹L$(H‹\$0H‰\$`HƒùH‰L$XtZH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$ˆH‰$H‹œ$H‰\$H‰T$hH‰T$H‰L$pH‰L$HÇD$ ”èèHƒÄxÃH‹œ$€H‰$Hƒ<$„Hƒ$HèH‹œ$€H‰$Hƒ<$„êHƒ$HH QjèH‹´$˜H‹”$ YYH…À…°H‹l$8¶]X€ûu8H‰4$H‰T$HHl$H‰ïH‰ÞH¥H¥HÇD$ èèHƒÄxÃHÇD$ÌH‰$H‹^0ÿÓH‹T$81í@ˆjXH‹Œ$€H‹™81íH9ët+H‰T$@HH‰$H‹©8H‰l$H\$@H‰\$èèHƒÄxÃèHƒÄxÉ%é +ÿÿÿ‰%éÝþÿÿ‰éþÿÿ* + 0runtime.morestack_noctxtNŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsfgo.string."id"‚,type.map[string]stringÆ4runtime.mapaccess1_faststr¼@"".(*DockerServer).findContainerœ +¢net/http.Error®&runtime.deferreturnú(sync.(*RWMutex).LockÀ2sync.(*RWMutex).Unlock·fÐ"runtime.deferprocÂBgo.string."Container not running"ünet/http.Errorˆ&runtime.deferreturn¼ +’ptype.chan<- *github.com/fsouza/go-dockerclient.ContainerÐ"runtime.chansend1Ü&runtime.deferreturnò&runtime.deferreturn@ð"".autotmp_0687obtype.*github.com/fsouza/go-dockerclient.Container"".autotmp_0686type.string"".autotmp_0684type.string "".err?type.error"".containerbtype.*github.com/fsouza/go-dockerclient.Container +"".id_type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerNð…ïðENïðiïð +ïð.ðL¾c> O !F- A  "&–!IDTgclocals·bc335ce91c3a8b5f426dd201465802bdTgclocals·97d2741936c7bda613787afceb8adff3ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþB"".(*DockerServer).pauseContainerÀ°eH‹ %H;awèëêHƒìpH‹œ$H‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰t$`H‰t$H‰l$hH‰l$èH‹\$ Hƒû„›H‹H‹kH‹\$xH‰$H‰T$@H‰T$H‰l$HH‰l$èH‹\$H‰\$8H‹L$(H‹\$0H‰\$XHƒùH‰L$PtZH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$€H‰$H‹œ$ˆH‰\$H‰T$`H‰T$H‰L$hH‰L$HÇD$ ”èèHƒÄpÃH‹\$xH‰$Hƒ<$„ÔHƒ$HèH‹\$xH‰$Hƒ<$„ªHƒ$HH QjèH‹´$H‹”$˜YYH…ÀutH‹l$8¶]Y€ût8H‰4$H‰T$HHl$H‰ïH‰ÞH¥H¥HÇD$ èèHƒÄpÃHÇD$ÌH‰$H‹^0ÿÓH‹\$8HÇÅ@ˆkYèHƒÄpÃèHƒÄpÉ%éJÿÿÿ‰%é ÿÿÿ‰é^þÿÿ& + 0runtime.morestack_noctxtNŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsfgo.string."id"‚,type.map[string]stringÆ4runtime.mapaccess1_faststr¶@"".(*DockerServer).findContainer– +œnet/http.Error¨&runtime.deferreturnî(sync.(*RWMutex).Lock®2sync.(*RWMutex).Unlock·f¾"runtime.deferproc¨Hgo.string."Container already paused"ânet/http.Errorî&runtime.deferreturn¢ +Ê&runtime.deferreturnà&runtime.deferreturn@à"".autotmp_0691type.string"".autotmp_0689type.string "".err?type.error"".containerobtype.*github.com/fsouza/go-dockerclient.Container +"".id_type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerNà‚ßà?Jßà-ßà +ßà' Hâc; O ?-   &z!=Tgclocals·0b0af158856f2ab75a5e0667d877f9ebTgclocals·0a4b95df80c389fe7e338059324575e1ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþF"".(*DockerServer).unpauseContainerÀ¦eH‹ %H;awèëêHƒìpH‹œ$H‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰t$`H‰t$H‰l$hH‰l$èH‹\$ Hƒû„–H‹H‹kH‹\$xH‰$H‰T$@H‰T$H‰l$HH‰l$èH‹\$H‰\$8H‹L$(H‹\$0H‰\$XHƒùH‰L$PtZH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$€H‰$H‹œ$ˆH‰\$H‰T$`H‰T$H‰L$hH‰L$HÇD$ ”èèHƒÄpÃH‹\$xH‰$Hƒ<$„ÏHƒ$HèH‹\$xH‰$Hƒ<$„¥Hƒ$HH QjèH‹´$H‹”$˜YYH…ÀuoH‹l$8¶]Y€ûu8H‰4$H‰T$HHl$H‰ïH‰ÞH¥H¥HÇD$ èèHƒÄpÃHÇD$ÌH‰$H‹^0ÿÓH‹\$81í@ˆkYèHƒÄpÃèHƒÄpÉ%éOÿÿÿ‰%é%ÿÿÿ‰écþÿÿ& + 0runtime.morestack_noctxtNŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsfgo.string."id"‚,type.map[string]stringÆ4runtime.mapaccess1_faststr¶@"".(*DockerServer).findContainer– +œnet/http.Error¨&runtime.deferreturnî(sync.(*RWMutex).Lock®2sync.(*RWMutex).Unlock·f¾"runtime.deferproc¨@go.string."Container not paused"ânet/http.Errorî&runtime.deferreturn¢ +À&runtime.deferreturnÖ&runtime.deferreturn@à"".autotmp_0695type.string"".autotmp_0693type.string "".err?type.error"".containerobtype.*github.com/fsouza/go-dockerclient.Container +"".id_type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerNà‚ßà?Jßà(ßà +ßà, H„ c; O ?-    &z! BTgclocals·0b0af158856f2ab75a5e0667d877f9ebTgclocals·0a4b95df80c389fe7e338059324575e1ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".(*DockerServer).attachContainer€*ê)eH‹ %H„$0ÿÿÿH;AwèëâHìPH‹œ$pH‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$H‰T$H‰¬$H‰l$èH‹\$ Hƒû„ç H‹ H‹kH‹œ$XH‰$H‰Œ$H‰L$H‰¬$˜H‰l$èH‹\$H‰\$hH‹D$(H‹\$0H‰œ$¸HƒøH‰„$°t]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$`H‰$H‹œ$hH‰\$H‰Œ$H‰L$H‰„$H‰D$HÇD$ ”èHÄPÃHH‰$H‹œ$`H‰\$H‹œ$hH‰\$èH‹”$`H‹Œ$hH‹\$H‰œ$ H‹\$ H‰œ$¨¶\$(€ûu5H‰$H‰L$HHl$H‰ïH‰ÞH¥H¥HÇD$ ôèHÄPÃH‰ $H‹Z ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$hH‰$H‹œ$`H‹[0ÿÓH‹œ$¨H‰$H‹œ$ H‹[ ÿÓH‹T$H‰”$ÀH‹L$H‰Œ$ÈH‹D$ H‹\$(H‰œ$¸HƒøH‰„$°t]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$`H‰$H‹œ$hH‰\$H‰Œ$H‰L$H‰„$H‰D$HÇD$ ôèHÄPÃHH‰$H‰T$H‰L$èH‹L$H‹D$ HHl$XH‰ïH‰ÞH¥H‰Œ$àH‰Œ$€H‰„$èH‰„$ˆH\$XHl$PH‰ïH‰ÞH¥HH‰$HÇD$HÇD$èH‹\$H‰œ$8H‹\$ H‰œ$@H‹\$(H‰œ$HHH‰$èH‹D$H‰D$xH‰$Hƒ<$„žH‹œ$€H‰\$H‹œ$ˆH‰\$èH‹D$xHƒø„iHhH\$PH‰ïH‰ÞH¥H‰$Hƒ<$„=Hƒ$H‹œ$8H‰\$H‹œ$@H‰\$H‹œ$HH‰\$èH‹D$xH‹l$h¶]X€û„vH‰D$`H‰D$xH‹H‰D$p1íH9è„Hœ$HÇHÇCHœ$Hƒû„îHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$H‹\$hH‰\$Hƒ|$„›èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹D$xH‹L$pH‰Œ$àH‰ $H‰„$èH‰D$HHl$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$ H‹œ$(H‰\$(H‹œ$0H‰\$0èH‹\$`H‰\$xHH‹+H‰¬$ðH‹kH‰¬$øH‹H‰D$p1íH9脆Hœ$HÇHÇCHœ$Hƒû„VHÇÁHÇÂH‰œ$ H‰Œ$(H‰”$0HH‰$Hœ$ðH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹D$xH‹L$pH‰Œ$àH‰ $H‰„$èH‰D$H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹\$`H‰\$xHH‹+H‰¬$ðH‹kH‰¬$øH‹H‰D$p1íH9è„Hœ$HÇHÇCHœ$Hƒû„éHÇÂHÇÁH‰œ$ H‰”$(H‰Œ$0HH‰$Hœ$ðH‰\$èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹D$xH‹L$pH‰Œ$àH‰ $H‰„$èH‰D$H‹œ$ H‰\$H‹œ$(H‰\$H‹œ$0H‰\$ èH‹œ$ÈH‰$H‹œ$ÀH‹[ ÿÓHÄPÉéÿÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$pé°þÿÿ‰é£ýÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$péCýÿÿ‰%éYüÿÿ‰é üÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$pé«ûÿÿH‰D$`H‰D$xH‹H‰D$p1íH9è„'Hœ$HÇHÇCHœ$Hƒû„÷HÇÁHÇÂH‰œ$ H‰Œ$(H‰”$0HH‰$H‹\$hH‰\$Hƒ|$„¤èH‹L$H‹D$H‹œ$ H‰$H‰Œ$ÐH‰L$H‰„$ØH‰D$èH‹D$xH‹L$pH‰Œ$àH‰ $H‰„$èH‰D$HHl$H‰ïH‰ÞH¥H¥H‹œ$ H‰\$ H‹œ$(H‰\$(H‹œ$0H‰\$0èé…ûÿÿ‰%éPÿÿÿ‰éÿÿÿHH‰$HH‰\$HH‰\$èH‹\$H‰\$pé¢þÿÿ‰%é·ùÿÿ‰éùÿÿ‰%éVùÿÿ‰éöÿÿŽ +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars|go.string."id"˜,type.map[string]stringè4runtime.mapaccess1_faststrê@"".(*DockerServer).findContainerÖ +ènet/http.Error†,type.net/http.HijackerÌ$runtime.assertI2I2ÔHgo.string."cannot hijack connection"Žnet/http.Error¶ +Ò0go.string."Content-Type"øZgo.string."application/vnd.docker.raw-stream" &net/http.Header.Setê +ž +¦ + +¸ net/http.ErrorÖ type.io.Writerü runtime.convI2Iž ¬github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.Stdoutª type.[]uint8à "runtime.makeslice¼¼type.github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdWriterÎ"runtime.newobject¾2runtime.writebarrierifaceþ2runtime.writebarriersliceÎØgo.itab.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdWriter.io.Writerštype.stringØruntime.convT2EÂ2runtime.writebarrieriface–Jgo.string."Container %q is running\n"Œfmt.Fprintf®4go.string."What happened?"êØgo.itab.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdWriter.io.Writer¶type.stringâruntime.convT2EÌ2runtime.writebarrierifaceêfmt.FprintlnŒ>J°J#x1´atDjr78`­š«„«„4>J¥š@ETgclocals·fcdf49cfa428c5f0402944c8a015fd4eTgclocals·432f0e830fc39b000a597e1ecb706862ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).waitContainer€ ô eH‹ %HD$¸H;AwèëåHìÈH‹œ$èH‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$¸H‰T$H‰¬$ÀH‰l$èH‹\$ Hƒû„¯H‹ H‹kH‹œ$ÐH‰$H‰L$hH‰L$H‰l$pH‰l$èH‹\$H‰\$HH‹D$(H‹\$0H‰œ$€HƒøH‰D$xt]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$ØH‰$H‹œ$àH‰\$H‰Œ$¸H‰L$H‰„$ÀH‰D$HÇD$ ”èHÄÈÃHÇ$@BèH‹œ$ÐH‰$Hƒ<$„ÏHƒ$HèH‹„$ÐH‹l$H¶]X€û…ˆH‰$Hƒ<$„mHƒ$HèHH‰$HÇD$èH‹D$HH‹+H‰¬$¨H‹kH‰¬$°H‹\$HH‹khH‰l$8HH‰$H‰D$@H‰D$Hœ$¨H‰\$H\$8H‰\$èHH‰$H‹œ$ØH‰\$H‹œ$àH‰\$èH‹L$H‹D$ H‰Œ$˜H‰L$XH‰„$ H‰D$`HH‰$èH‹L$H‰ÏHƒùtx1ÀèH‰L$PH‰ $Hƒ<$tXH‹\$XH‰\$H‹\$`H‰\$èH‹L$PH‹D$@H‰ $H‰ÁHH‰„$ˆH‰D$H‰Œ$H‰L$èHÄÈÉ%량넉%é‡þÿÿH‰$Hƒ<$tHƒ$Hèéþÿÿ‰%ëè‰%é%þÿÿ‰éJýÿÿ4 +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrØ@"".(*DockerServer).findContainer¾ +Ðnet/http.Errorútime.Sleep¼*sync.(*RWMutex).RLock¢.sync.(*RWMutex).RUnlock°&type.map[string]intÔruntime.makemapì,go.string."StatusCode"Ä&type.map[string]int˜$runtime.mapassign1¦type.io.Writerìruntime.convI2I 4type.encoding/json.EncoderÔ "runtime.newobjectþ ð runtime.duffzeroÐ +2runtime.writebarrieriface€ &type.map[string]int¾ >encoding/json.(*Encoder).Encodeª .sync.(*RWMutex).RUnlock@"".autotmp_0762ï6type.*encoding/json.Encoder"".autotmp_07616type.*encoding/json.Encoder"".autotmp_0760_type.io.Writer"".autotmp_0759Ÿtype.int"".autotmp_0758?type.string"".autotmp_0756type.string"".autotmp_0754type.string$encoding/json.w·2ßtype.io.Writer"".result&type.map[string]int "".errŸtype.error"".containerÿbtype.*github.com/fsouza/go-dockerclient.Container +"".id¿type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer("‘¶UÀVâ "iA U ) +{Ó    *.ŽÏ*4>7 6,Tgclocals·f3828558443ce662a87feff12c09632bTgclocals·7669d68cdbc54f3d43537fbce1cd730bü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".(*DockerServer).removeContainerÀÀeH‹ %HD$ H;AwèëåHìàH‹œ$H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$ H‰T$H‰¬$¨H‰l$èH‹\$ Hƒû„UH‹ H‹kH‰L$pH‰l$xH‹œ$H‹kH‰,$èH‹D$HH‹H‰T$`H‹KH‰L$hHÇD$@HÇD$H1íH9è…>1Ò1ÉH‰T$@H‰”$€H‰L$HH‰Œ$ˆH‹œ$èH‰$H‹\$pH‰\$H‹\$xH‰\$èH‹¼$ˆH‹t$ H‹L$(H‹\$0H‰œ$˜HƒùH‰Œ$tcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$ðH‰$H‹œ$øH‰\$H‰”$ H‰T$H‰Œ$¨H‰L$HÇD$ ”èèHÄàÃL‹„$èIƒø„QI‹I‹@M‹HL‰Œ$ØH‰Œ$ÈH‰t$8H‰„$ÐH9ƃH,ñH‹m¶]X€ûtAHƒÿ…§H‹´$€H‰4$H‰|$H5LD$L‰ÇH¥H¥è¶\$ €û„pHÇD$ÌH‹œ$øH‰$H‹œ$ðH‹[0ÿÓH‹œ$èH‰$Hƒ<$„*Hƒ$HèH‹œ$èH‰$Hƒ<$„ýHƒ$HH QjèH‹”$øYYH…À…ÈH‹rH‹ +H‹BH‹jH‹l$8H9ѤHéH‰$H‰ðHÿÈH‰ÓH‹H‹KH‹kH‰¬$ØH‰”$ÈH‰Œ$ÐH9ÈsdHÂH‹+H‰l$èH‹´$èH‹NHÿÉH‹VH9Êr4H‹>H‰¼$ÈH‰>H‰Œ$ÐH‰NH‰”$ØH‰VèHÄàÃè è è èHÄàÉ%é÷þÿÿ‰%éÊþÿÿHH‹H‹kH‹œ$ðH‰$H‹œ$øH‰\$H‰T$PH‰T$H‰l$XH‰l$HÇD$ ôèèHÄàÃè A‰é§ýÿÿHÇ„$°HÇ„$¸HÇ„$ÀHH‰$H‰D$H‰”$ H‰T$H‰Œ$¨H‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$°H‹PH‰”$¸H‹hH‰¬$À€ûtHƒútHƒúv H‹H‹Né#üÿÿè 1Ò1Ééüÿÿ‰ë¬‰é¤ûÿÿ> +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrÌ(net/url.(*URL).Queryä"go.string."force"Æ@"".(*DockerServer).findContainer¸ +Ênet/http.ErrorÖ&runtime.deferreturnÒgo.string."1"ô runtime.eqstringÚ +˜ +(sync.(*RWMutex).LockÞ +2sync.(*RWMutex).Unlock·fî +"runtime.deferprocâ .runtime.writebarrierptrê &runtime.deferreturn„$runtime.panicslice’$runtime.panicindex $runtime.panicindex°&runtime.deferreturnþÄgo.string."Error: API error (406): Impossible to remove a running container, please stop it first"‚net/http.ErrorŽ&runtime.deferreturn¨$runtime.panicindex’&type.net/url.Valuesâ4runtime.mapaccess2_faststrŒ$runtime.panicindex@À""".autotmp_0775type.int"".autotmp_0773type.int"".autotmp_0772type.int"".autotmp_0771type.string"".autotmp_0769type.string"".autotmp_0766type.string "".~r0¿type.stringnet/url.vs·4_type.[]stringnet/url.key·3ÿtype.string "".msgŸtype.string "".errŸtype.error"".indexÏtype.int"".force¿type.string +"".idßtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerT"À”¿Àý»¿À"¿Àn¿ÀÍà hŠ +"smDU• +#!>o6 ; · .w}:”UTgclocals·bc335ce91c3a8b5f426dd201465802bdTgclocals·986af82aaae26ef643e71b57d814342aü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".(*DockerServer).commitContainerÀ8º8eH‹ %H„$àûÿÿH;AwèëâHì H‹œ$ÀH‹kH‰,$èH‹D$HH‹H‰”$H‹KH‰Œ$ HÇD$hHÇD$p1íH9è…ì 1É1ÀH‰L$hH‰D$pH‹œ$¨H‰$H‰Œ$XH‰L$H‰„$`H‰D$èH‹\$H‰\$PH‹D$(H‹\$0H‰œ$pHƒøH‰„$ht]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$°H‰$H‹œ$¸H‰\$H‰Œ$¨H‰L$H‰„$°H‰D$HÇD$ ”èHÄ ÃHÇD$XH‹œ$ÀH‹kH‰,$èH‹D$HH‹H‰”$øH‹KH‰Œ$HÇ„$ˆHÇ„$1íH9è… 1É1ÀH‰Œ$ˆH‰Œ$ØH‰„$H‰„$àHƒø„úHH‰$èH‹D$H‰D$XH‰D$`H‹œ$ØH‰$H‹œ$àH‰\$èH\$H,$H‰ïH‰ÞH¥H¥H¥H‹L$`HH‰„$xH‰D$H‰Œ$€H‰L$ èH‹L$(H‹D$0H‰„$pHƒùH‰Œ$ht]H‰$H‹Y ÿÓH‹L$H‹D$H‹œ$°H‰$H‹œ$¸H‰\$H‰Œ$¨H‰L$H‰„$°H‰D$HÇD$ èHÄ ÃHÇD$ÈH‹œ$¸H‰$H‹œ$°H‹[0ÿÓH‹œ$ÀH‹kH‰,$èH‹D$HH‹H‰”$8H‹KH‰Œ$@HÇ„$¨HÇ„$°1íH9è…†HÇ„$¨HÇ„$°H‹œ$ÀH‹kH‰,$èH‹D$HH‹H‰”$H‹KH‰Œ$HÇD$xHÇ„$€1íH9è…<HÇD$xHÇ„$€HH¬$H‰ïH‰ÞèHH,$H‰ïH‰ÞH¥H¥H‹|$PHƒÿ„âH/H|$H‰îH¥H¥èH‹D$PH\$ H¬$H‰ïH‰ÞH¥H¥Hƒø„¡H¨°Hœ$ H‰ßH‰îH¥H¥H(Hœ$ØH‰ßH‰îH¥H¥H‹œ$¨H‰œ$°H‹œ$°H‰œ$¸H‹\$xH‰œ$hH‹œ$€H‰œ$pH‹\$XH‰œ$xH‹œ$ÀH‹kH‰,$èH‹D$HH‹H‰”$(H‹KH‰Œ$0HÇ„$˜HÇ„$ 1íH9è…1É1ÀH‰Œ$˜H‰Œ$èH‰„$ H‰„$ðH‹œ$ÀH‹kH‰,$èH‹D$HH‹H‰”$HH‹KH‰Œ$PHÇ„$¸HÇ„$À1íH9è…×1É1ÀH‰Œ$¸H‰Œ$ÈH‰„$ÀH‰„$ÐH‹œ$¨H‰$Hƒ<$„Hƒ$xèH‹œ$¨Hƒû„mH‹S`H‹KhH‹[pH‰”$xH‰Œ$€H‰œ$ˆH‰ØH)ËHƒû}OHH‰$H‰”$`H‰T$H‰Œ$hH‰L$H‰„$pH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$hH‰„$pHH‰$H‰ÓH‰”$`H‰ÍHiíHëH‰\$Hœ$H‰\$èH‹”$`H‹Œ$hH‹„$pH‹œ$¨H‰$Hƒ<$„cHƒ$`H‰”$xH‰T$H‰Œ$€H‰L$H‰„$ˆH‰D$èH‹”$èH‹Œ$ÐH‹„$ðHƒø„´HƒùtPH‰”$¨H‰$H‰„$°H‰D$HHl$H‰ïH‰ÞH¥H¥H‹œ$ÈH‰\$ H‰L$(èH‹T$0H‹D$8H‰”$èH‰”$˜H‰„$ðH‰„$ HH‰$H‹œ$¨H‹«H‰l$Hœ$˜H‰\$Hœ$H‰\$èH‹œ$¨H‰$Hƒ<$„9Hƒ$xèHœ$ˆHÇHÇCHœ$ˆHƒû„ÿHÇÁHÇÂH‰œ$HH‰Œ$PH‰”$XHH‰$Hœ$H‰\$èH‹D$H‹L$H‹œ$HH‰$H‰„$xH‰D$H‰Œ$€H‰L$èHH‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH\$H,$H‰ïH‰ÞH¥H¥HHl$H‰ïH‰ÞH¥H¥H‹œ$HH‰\$ H‹œ$PH‰\$(H‹œ$XH‰\$0èHÄ Ééúþÿÿ‰%é»þÿÿ‰%é‘ýÿÿ‰éŒüÿÿ‰%édüÿÿHÇ„$HÇ„$ HÇ„$(HH‰$H‰D$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtOH‹H‰”$H‹IH‰Œ$ H‹kH‰¬$(<tHƒùtHƒùv H‹ +H‹Bé…ûÿÿè 1É1Àéuûÿÿ‰ë­HÇ„$èHÇ„$ðHÇ„$øHH‰$H‰D$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtOH‹H‰”$èH‹IH‰Œ$ðH‹kH‰¬$ø<tHƒùtHƒùv H‹ +H‹BéJúÿÿè 1É1Àé:úÿÿ‰ë­‰éXùÿÿ‰éùÿÿHÇ„$0HÇ„$8HÇ„$@HH‰$H‰D$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtmH‹H‰”$0H‹IH‰Œ$8H‹kH‰¬$@<t,Hƒùt&HƒùvH‹*H‰l$xH‹jH‰¬$€é$øÿÿè HÇD$xHÇ„$€éøÿÿ‰ëHÇ„$¸HÇ„$ÀHÇ„$ÈHH‰$H‰D$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtsH‹H‰”$¸H‹IH‰Œ$ÀH‹kH‰¬$È<t/Hƒùt)HƒùvH‹*H‰¬$¨H‹jH‰¬$°éÚöÿÿè HÇ„$¨HÇ„$°é¶öÿÿ‰ë‰HÇ„$ÐHÇ„$ØHÇ„$àHH‰$H‰D$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtOH‹H‰”$ÐH‹IH‰Œ$ØH‹kH‰¬$à<tHƒùtHƒùv H‹ +H‹BéPôÿÿè 1É1Àé@ôÿÿ‰ë­HÇ„$HÇ„$HÇ„$HH‰$H‰D$H‰”$¨H‰T$H‰Œ$°H‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$H‹PH‰”$H‹hH‰¬$€ûtHƒútHƒúv H‹H‹Féuòÿÿè 1É1Àéeòÿÿ‰ë¬‚ +00runtime.morestack_noctxtl(net/url.(*URL).Query„*go.string."container"Þ@"".(*DockerServer).findContainerÊ +Ünet/http.Error¨(net/url.(*URL).QueryÀgo.string."run"žZtype.github.com/fsouza/go-dockerclient.Config°"runtime.newobjectŠ2runtime.stringtoslicebyteÌ\type.*github.com/fsouza/go-dockerclient.ConfigŠ .encoding/json.Unmarshalâ +ô +net/http.ErrorÎ +ô (net/url.(*URL).QueryŒ go.string."m"Ú (net/url.(*URL).Queryò $go.string."author"˜""".statictmp_0817¾È runtime.duffcopyÌ go.string."img-"®*runtime.concatstring2”(net/url.(*URL).Query¬ go.string."repo"’(net/url.(*URL).Queryªgo.string."tag"¨(sync.(*RWMutex).Lockº\type.[]github.com/fsouza/go-dockerclient.Image¬"runtime.growslice„Xtype.github.com/fsouza/go-dockerclient.Imageê.runtime.writebarrierfatª2runtime.writebarriersliceºgo.string.":"†*runtime.concatstring3è,type.map[string]stringÖ$runtime.mapassign1˜ ,sync.(*RWMutex).UnlockÄ!type.stringð!runtime.convT2EÚ"2runtime.writebarrierifaceè"type.io.Writer®#runtime.convI2Iâ#.go.string."{\"ID\":%q}"Ø$fmt.Fprintf¢&&type.net/url.Valuesò&4runtime.mapaccess2_faststr¦($runtime.panicindexš)&type.net/url.Valuesê)4runtime.mapaccess2_faststrž+$runtime.panicindex®,&type.net/url.Valuesþ,4runtime.mapaccess2_faststrÌ.$runtime.panicindexâ/&type.net/url.Values²04runtime.mapaccess2_faststr†2$runtime.panicindex¢3&type.net/url.Valuesò34runtime.mapaccess2_faststr¦5$runtime.panicindexš6&type.net/url.Valuesê64runtime.mapaccess2_faststr”8$runtime.panicindex@À~"".autotmp_0828"type.interface {}"".autotmp_0826¯ &type.[]interface {}"".autotmp_0822type.int"".autotmp_0821ÿ\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0820Ï\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0819type.*[]string"".autotmp_0818type.*[]string"".autotmp_0816type.*[]string"".autotmp_0815type.*[]string"".autotmp_0814type.*[]string"".autotmp_0812¯ (type.[1]interface {}"".autotmp_0811 type.string"".autotmp_0810type.string"".autotmp_0809\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0808type.int"".autotmp_0807type.string"".autotmp_0806&type.net/url.Values"".autotmp_0805type.int"".autotmp_0804type.string"".autotmp_0803&type.net/url.Values"".autotmp_0802type.int"".autotmp_0801type.string"".autotmp_0800&type.net/url.Values"".autotmp_0799type.int"".autotmp_0798type.string"".autotmp_0797&type.net/url.Values"".autotmp_0796type.string"".autotmp_0794\type.*github.com/fsouza/go-dockerclient.Config"".autotmp_0793ÿ\type.*github.com/fsouza/go-dockerclient.Config"".autotmp_0792type.int"".autotmp_0791type.string"".autotmp_0790&type.net/url.Values"".autotmp_0789type.string"".autotmp_0787ï type.string "".~r0Ïtype.stringnet/url.vs·4 +type.[]stringnet/url.key·3¯ type.string "".~r0type.stringnet/url.vs·4ï +type.[]stringnet/url.key·3ï type.string "".~r0Ïtype.stringnet/url.vs·4ß type.[]stringnet/url.key·3¯type.string "".~r0ïtype.stringnet/url.vs·4Ï type.[]stringnet/url.key·3Ï type.string "".~r0¯type.stringnet/url.vs·4Ÿ type.[]stringnet/url.key·3Ïtype.string "".~r0ïtype.stringnet/url.vs·4¿ +type.[]stringnet/url.key·3type.string "".tag¯type.string"".repositoryïtype.string"".imageŸXtype.github.com/fsouza/go-dockerclient.Image"".runConfigtype.string"".config\type.*github.com/fsouza/go-dockerclient.Config "".errï type.error"".containerŸbtype.*github.com/fsouza/go-dockerclient.Container +"".id type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer8%À”¿À‹¿Àñ ¿Àè  â² +%cGU wzU# +sm 3£!Ù +P^!   ¼¼°  +  +³¼ ºˆ5ž„-@-v…ª³‹á`nh!l +´ Z¬gsjvZÎTgclocals·9a68f8414e7859befd5e7341b374c125Tgclocals·4ffc9bba9f386a0dab8c46db5f1d911aü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).findContainerÀ¢eH‹ %HD$ÐH;AwèëåHì°HÇ„$ÐHÇ„$ØHÇ„$àHÇ„$èH‹œ$¸H‰$Hƒ<$„Hƒ$HèH‹œ$¸H‰$Hƒ<$„ïHƒ$HH QjèH‹”$ØYYH…À…ºH‹œ$¸Hƒû„¡L‹H‹CH‹kH‰¬$¨1öH‰„$ H‰D$0L‰„$˜H‹l$0H9îGL‰D$PI‹8H‰t$8H‰t$(H‰|$@Hƒÿ„AH‹H‹GH9Ð…ŒH‰Œ$ˆH‰ $H‰„$H‰D$H‹¬$ÀH‰l$H‰T$èL‹D$PH‹|$@H‹t$8H‹”$ȶ\$ €ût;H‰¼$ÐH‹\$(H‰œ$ØHÇ„$àHÇ„$èèHÄ°ÃH‰ûHƒÿ„‘H‹¿ H‰¼$ˆH‹‹(H‰Œ$H9ÑuEH‰<$H‰L$H‹¬$ÀH‰l$H‰T$èL‹D$PH‹|$@H‹t$8H‹”$ȶ\$ €û…PÿÿÿIƒÀHÿÆH‹l$0H9þÿÿHH‹+H‰l$hH‹kH‰l$pHÇD$XHÇD$`HH‰$èH‹L$H‰L$HH‰ $Hƒ<$„µH‹\$hH‰\$H‹\$pH‰\$èH‹\$HH‰\$HH‹ 1íH9étRH‹T$HH‰L$xH‰”$€HÇ„$ÐHÇ„$ØÿÿÿÿH‰L$XH‰Œ$àH‰T$`H‰”$èèHÄ°ÃHH‰$HH‰\$HH‰\$èH‹L$é|ÿÿÿ‰%é?ÿÿÿ‰éhþÿÿ‰é¸ýÿÿ‰éXýÿÿèHİÉ%éýÿÿ‰%éØüÿÿ& +*0runtime.morestack_noctxtÞ*sync.(*RWMutex).RLock¤4sync.(*RWMutex).RUnlock·f´"runtime.deferproc° runtime.eqstringØ&runtime.deferreturnˆ runtime.eqstringŠ :go.string."No such container"Þ .type.errors.errorStringð "runtime.newobjectÔ +4runtime.writebarrierstringö +Bgo.itab.*errors.errorString.error˜ &runtime.deferreturn¶ 0type.*errors.errorStringÌ type.errorä Bgo.itab.*errors.errorString.errorø  runtime.typ2ItabÚ &runtime.deferreturnpà$"".autotmp_0846otype.error"".autotmp_0845Ï0type.*errors.errorString"".autotmp_0844type.string"".autotmp_0843Otype.string"".autotmp_0841¿dtype.**github.com/fsouza/go-dockerclient.Container"".autotmp_0840ÿtype.int"".autotmp_0839ïtype.int"".autotmp_08380type.*errors.errorString"".autotmp_0837/ftype.[]*github.com/fsouza/go-dockerclient.Container "".~r0¯type.errorerrors.text·2type.string"".containerßbtype.*github.com/fsouza/go-dockerclient.Container"".itype.int "".~r3Ptype.error "".~r2@type.int "".~r10btype.*github.com/fsouza/go-dockerclient.Container"".idOrNametype.string"".s*type.*"".DockerServerD"àußàßßà`ßà' :‚ R!>\p;u +”,néSYt2a105Tgclocals·8bd789dcce9d4daa4c4bb84dfe47e247Tgclocals·4459bbba29917b2fee408f0dbbff89b1ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ:"".(*DockerServer).buildImageÀ®eH‹ %H„$øüÿÿH;AwèëâHìˆH‹´$¨H‹~8H‰<$H5H|$H¥H¥èH‹L$H‹D$ H‰Œ$H‰Œ$¸H‰„$H‰„$ÀHƒø…¦H‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €û„tÆD$GHH‰$H‹¼$¨Hƒÿ„þHo@H|$H‰îH¥H¥èH‹L$H‹D$ H‰Œ$ØH‰L$xH‰„$àH‰„$€HH‰$èH‹|$H‰ùHƒÿ„™1ÀèH‰L$PH‰ $Hƒ<$„rH‹\$xH‰\$H‹œ$€H‰\$èH‹\$PH‰\$HH‹\$HH‰$èH‹D$H‹L$H‹\$H‰œ$°HƒùH‰Œ$¨„®€|$GuuHÇD$H‹œ$ H‰$H‹œ$˜H‹[0ÿÓHH,$H‰ïH‰ÞH¥H¥èH\$Hl$H‰ïH‰ÞH¥H¥H¥H‹œ$ H‰$H‹œ$˜H‹[(ÿÓHĈÃH‹œ$H‰$èH‹\$H‰œ$H‹\$H‰œ$èH‹,$‹T$H‹L$H¼$x1ÀèH‹œ$H‰œ$xH‹œ$H‰œ$€H‰¬$¨‰”$°H‰Œ$¸H‹œ$¨H‹kH‰,$èH‹D$H‹œ$xH‰œ$ˆH‹œ$€H‰œ$HH‹H‰”$˜H‹KH‰Œ$ HÇD$XHÇD$`1íH9è…ƒ1É1ÀH‰L$XH‰L$hH‰D$`H‰D$pHƒøtH‰Œ$ˆH‰„$H‹œ$H‰$Hƒ<$„2Hƒ$xèH‹œ$Hƒû„H‹S`H‹KhH‹[pH‰”$`H‰Œ$hH‰œ$pH‰ØH)ËHƒû}OHH‰$H‰”$HH‰T$H‰Œ$PH‰L$H‰„$XH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$PH‰„$XHH‰$H‰ÓH‰”$HH‰ÍHiíHëH‰\$Hœ$xH‰\$èH‹”$HH‹Œ$PH‹„$XH‹œ$H‰$Hƒ<$„Hƒ$`H‰”$`H‰T$H‰Œ$hH‰L$H‰„$pH‰D$èH‹œ$ˆH‰œ$øH‹œ$H‰œ$HH‰$H‹œ$H‹«H‰l$Hœ$øH‰\$Hœ$xH‰\$èH‹œ$H‰$Hƒ<$„SHƒ$xèHœ$èHÇHÇCHœ$èHƒû„HÇÁHÇÂH‰œ$0H‰Œ$8H‰”$@HH‰$Hœ$xH‰\$èH‹L$H‹D$H‹œ$0H‰$H‰Œ$ÈH‰L$H‰„$ÐH‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$0H‰\$H‹œ$8H‰\$H‹œ$@H‰\$ èH‹L$(H‹D$0H‰Œ$H‰ $H‰„$H‰D$èH\$Hl$H‰ïH‰ÞH¥H¥H¥H‹œ$ H‰$H‹œ$˜H‹[(ÿÓHĈÉéàþÿÿ‰%é¡þÿÿ‰%éïýÿÿ‰éêüÿÿ‰%éÂüÿÿHÇ„$HÇ„$ HÇ„$(HH‰$H‰D$H‰”$H‰T$H‰Œ$H‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$H‹PH‰”$ H‹hH‰¬$(€ûtHƒútHƒúv H‹H‹FéÞûÿÿè 1É1ÀéÎûÿÿ‰ë¬Hƒøt]H‹H‰Œ$H‹@H‰„$Hƒø +…ôùÿÿH‰ $H‰D$H-LD$L‰ÇH‰îH¥H¥è¶\$ €û„ÂùÿÿÆD$Gé¸ùÿÿ‰ëŸ‰%é‚ùÿÿ‰é`ùÿÿ‰éûøÿÿV +00runtime.morestack_noctxtp0go.string."Content-Type"Œ&net/http.Header.Get”6go.string."application/tar"¼ runtime.eqstringðtype.io.ReaderÆruntime.convI2I¢.type.archive/tar.Reader´"runtime.newobjectæä runtime.duffzeroÆ2runtime.writebarrierifaceö4archive/tar.(*Reader).Next  +ª6go.string."miss Dockerfile"Ð2runtime.stringtoslicebyte´ +â:"".(*DockerServer).generateID  time.NowØ ø runtime.duffzeroð +(net/url.(*URL).QueryÈ go.string."t"Î (sync.(*RWMutex).Lockà\type.[]github.com/fsouza/go-dockerclient.ImageÒ"runtime.growsliceªXtype.github.com/fsouza/go-dockerclient.Image.runtime.writebarrierfatÐ2runtime.writebarrierslicež,type.map[string]stringŒ$runtime.mapassign1Î,sync.(*RWMutex).Unlockútype.string¦runtime.convT2E2runtime.writebarrierifacežBgo.string."Successfully built %s"’fmt.Sprintfâ2runtime.stringtoslicebyteÆ +Œ&type.net/url.ValuesÜ4runtime.mapaccess2_faststr†$runtime.panicindex’,go.string."Dockerfile"º runtime.eqstring@<"".autotmp_0872ÿ +"type.interface {}"".autotmp_0870¯ &type.[]interface {}"".autotmp_0866type.int"".autotmp_0865ÿ\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0864Ï\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0862type.string"".autotmp_0861ï 0type.*archive/tar.Reader"".autotmp_08600type.*archive/tar.Reader"".autotmp_0859ß +type.io.Reader"".autotmp_0858type.string"".autotmp_0857¿ +(type.[1]interface {}"".autotmp_0856Ÿ +type.string"".autotmp_0855\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0853type.string"".autotmp_0850type.string"".autotmp_0849ÿ type.string "".~r0ß type.stringnet/url.vs·4ß type.[]stringnet/url.key·3ß type.string archive/tar.r·2Ÿ type.io.Reader"".t¿ type.string"".repositoryÿ type.string"".imageŸXtype.github.com/fsouza/go-dockerclient.Image "".err¿ type.error +"".trÿ 0type.*archive/tar.Reader "".gotDockerFile type.bool +"".ctŸ type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer*%þÈóà’˜ %‹¼%#J ++F + Y!Á^!º  ·#Y  #REÔIVh¯á`^!lvåUZDTgclocals·f691ab09c838bb4c8855d6461c0f447dTgclocals·089441843964391e375901e8fc7a3c0cü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ8"".(*DockerServer).pullImage eH‹ %H„$øüÿÿH;AwèëâHìˆH‹œ$¨H‹kH‰,$èH‹D$HH‹H‰T$pH‹KH‰L$xHÇD$@HÇD$H1íH9è…1É1ÀH‰L$@H‰Œ$H‰D$HH‰„$˜H‹œ$¨H‹kH‰,$èH‹D$HH‹H‰”$€H‹KH‰Œ$ˆHÇD$PHÇD$X1íH9è…î1É1ÀH‰L$PH‰L$`H‰D$XH‰D$hH‹œ$H‰$èH‹T$H‹L$H¼$x1ÀèH‰”$xH‰Œ$€H‹œ$H‰$Hƒ<$„yHƒ$xèH‹œ$Hƒû„VH‹S`H‹KhH‹[pH‰”$@H‰Œ$HH‰œ$PH‰ØH)ËHƒû}OHH‰$H‰”$(H‰T$H‰Œ$0H‰L$H‰„$8H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$0H‰„$8HH‰$H‰ÓH‰”$(H‰ÍHiíHëH‰\$Hœ$xH‰\$èH‹”$(H‹Œ$0H‹„$8H‹œ$H‰$Hƒ<$„LHƒ$`H‰”$@H‰T$H‰Œ$HH‰L$H‰„$PH‰D$èH‹”$H‹L$hH‹„$˜Hƒø„ÇHƒù„_H‰”$ÀH‰„$ÈH‹\$`H‰œ$°H‰Œ$¸H¼$X1ÀèHœ$XHƒû„¥HÇÁHÇÂH‰œ$H‰Œ$H‰”$ HH‰$Hœ$ÀH‰\$èH‹L$H‹D$H‹œ$H‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH‰$Hœ$°H‰\$èH‹L$H‹D$H‹œ$HƒÃH‰$H‰Œ$ H‰L$H‰„$¨H‰D$èHH,$H‰ïH‰ÞH¥H¥H‹œ$H‰\$H‹œ$H‰\$H‹œ$ H‰\$ èH‹T$(H‹D$0H‰”$H‰”$ÀH‰„$˜H‰„$ÈHH‰$H‹œ$H‹«H‰l$Hœ$ÀH‰\$Hœ$xH‰\$èH‹œ$H‰$Hƒ<$tHƒ$xèHĈÉ%ëå‰éTþÿÿ‰%é¨ýÿÿ‰é£üÿÿ‰%é{üÿÿHÇ„$øHÇ„$HÇ„$HH‰$H‰D$H‰”$ÐH‰T$H‰Œ$ØH‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtOH‹H‰”$øH‹IH‰Œ$H‹kH‰¬$<tHƒùtHƒùv H‹ +H‹Bénûÿÿè 1É1Àé^ûÿÿ‰ë­HÇ„$àHÇ„$èHÇ„$ðHH‰$H‰D$H‰”$ÐH‰T$H‰Œ$ØH‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$àH‹PH‰”$èH‹hH‰¬$ð€ûtHƒútHƒúv H‹H‹FéDúÿÿè 1É1Àé4úÿÿ‰ë¬@ +00runtime.morestack_noctxtl(net/url.(*URL).Query„*go.string."fromImage"Æ(net/url.(*URL).QueryÞgo.string."tag"˜:"".(*DockerServer).generateIDÊø runtime.duffzero¬(sync.(*RWMutex).Lock¾\type.[]github.com/fsouza/go-dockerclient.Image°"runtime.growsliceˆXtype.github.com/fsouza/go-dockerclient.Imageî.runtime.writebarrierfat® +2runtime.writebarriersliceè ð runtime.duffzeroæ type.string’ runtime.convT2Eü 2runtime.writebarrierifaceŠtype.string¶runtime.convT2E¨2runtime.writebarrieriface¶"go.string."%s:%s"ªfmt.SprintfŒ,type.map[string]stringú$runtime.mapassign1´,sync.(*RWMutex).Unlockø&type.net/url.ValuesÈ4runtime.mapaccess2_faststrü$runtime.panicindexð&type.net/url.ValuesÀ4runtime.mapaccess2_faststrê$runtime.panicindex@<"".autotmp_0903"type.interface {}"".autotmp_0902Ï "type.interface {}"".autotmp_0900ï &type.[]interface {}"".autotmp_0896type.int"".autotmp_0895¿ \type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0894 \type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0893type.*[]string"".autotmp_0891type.string"".autotmp_0890type.string"".autotmp_0889¯ type.string"".autotmp_0888 type.string"".autotmp_0887ß(type.[2]interface {}"".autotmp_0886\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0885type.string"".autotmp_0884type.int"".autotmp_0883type.string"".autotmp_0882&type.net/url.Values"".autotmp_0880ï +type.string "".~r0ï type.stringnet/url.vs·4Ÿ +type.[]stringnet/url.key·3 type.string "".~r0 type.stringnet/url.vs·4Ï +type.[]stringnet/url.key·3¯ type.string"".imageŸXtype.github.com/fsouza/go-dockerclient.Image "".tagÏ type.string "".fromImageNameï type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer%€ ªÐ Xâ %mm!Ö + +ß^  ¼¿@5miJá`²ÌhŠ ZÓTgclocals·908986cc2bd23e6b2b43c6b331d27560Tgclocals·4fcfe95894f9cd95ba1d5aada184916cü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ8"".(*DockerServer).pushImageàÂeH‹ %HD$H;AwèëåHìðH‹œ$H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$°H‰T$H‰¬$¸H‰l$èH‹\$ Hƒû„H‹ H‹kH‰L$`H‰l$hH‹œ$H‹kH‰,$èH‹D$HH‹H‰T$pH‹KH‰L$xHÇD$@HÇD$H1íH9è…ÿ1É1ÀH‰L$@H‰L$PH‰D$HH‰D$XHƒøtYH‹|$`H‹T$hH‰¼$°H‰<$H‰”$¸H‰T$HH|$H‰ÞH¥H¥H‰L$ H‰D$(èH‹\$0H‰\$`H‹\$8H‰\$hH‹œ$øH‰$Hƒ<$„eHƒ$xèH‹L$`H‹D$hHH‰$H‹œ$øH‹«H‰l$H‰Œ$°H‰L$H‰„$¸H‰D$èH‹”$øH‹L$ ¶\$(Hƒù„ð€ûu`H‰$Hƒ<$tLHƒ$xèH‹œ$H‰$H‹´$H‰t$H5Hl$H‰ïH¥H¥HÇD$ ”èHÄðÉ%ë«H‰$Hƒ<$„pHƒ$xèHH‹+H‰¬$H‹kH‰¬$˜Hœ$ HÇHÇCHœ$ Hƒû„HÇÂHÇÁH‰œ$ØH‰”$àH‰Œ$èHH‰$Hœ$H‰\$èH‹L$H‹D$H‹œ$ØH‰$H‰Œ$€H‰L$H‰„$ˆH‰D$èHH‰$H‹œ$H‰\$H‹œ$H‰\$èH\$H,$H‰ïH‰ÞH¥H¥H‹œ$ØH‰\$H‹œ$àH‰\$H‹œ$èH‰\$ èHH‹+H‰¬$H‹kH‰¬$˜Hœ$ HÇHÇCHœ$ Hƒû„éHÇÁHÇÂH‰œ$ØH‰Œ$àH‰”$èHH‰$Hœ$H‰\$èH‹L$H‹D$H‹œ$ØH‰$H‰Œ$€H‰L$H‰„$ˆH‰D$èHH‰$H‹œ$H‰\$H‹œ$H‰\$èH\$H,$H‰ïH‰ÞH¥H¥H‹œ$ØH‰\$H‹œ$àH‰\$H‹œ$èH‰\$ èHÄðÉéÿÿÿ‰éáýÿÿ‰%é„ýÿÿ‰é ýÿÿ‰%éüÿÿHÇ„$ÀHÇ„$ÈHÇ„$ÐHH‰$H‰D$H‰”$°H‰T$H‰Œ$¸H‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$ÀH‹PH‰”$ÈH‹hH‰¬$ЀûtHƒútHƒúv H‹H‹Fébûÿÿè 1É1ÀéRûÿÿ‰ë¬‰éãúÿÿD +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsv go.string."name"’,type.map[string]stringâ4runtime.mapaccess1_faststrÌ(net/url.(*URL).Queryägo.string."tag"Ðgo.string.":"†*runtime.concatstring3ð*sync.(*RWMutex).RLock’,type.map[string]string€4runtime.mapaccess2_faststrì.sync.(*RWMutex).RUnlock¬2go.string."No such image"ànet/http.Error´ .sync.(*RWMutex).RUnlock ,go.string."Pushing..."œ type.stringÈ runtime.convT2E² 2runtime.writebarrierifaceÀ type.io.Writer† runtime.convI2I„fmt.Fprintln’$go.string."Pushed"ìtype.string˜runtime.convT2E‚2runtime.writebarrierifacetype.io.WriterÖruntime.convI2IÔfmt.Fprintln”&type.net/url.Valuesä4runtime.mapaccess2_faststrŽ$runtime.panicindex@à*"".autotmp_0929"type.interface {}"".autotmp_0928*type.*[1]interface {}"".autotmp_0927&type.[]interface {}"".autotmp_0926ß"type.interface {}"".autotmp_0924/&type.[]interface {}"".autotmp_0921type.string"".autotmp_0920(type.[1]interface {}"".autotmp_0919¿type.string"".autotmp_0918Ÿ(type.[1]interface {}"".autotmp_0917type.string"".autotmp_0916type.string"".autotmp_0914type.string"".autotmp_0911type.string "".~r0ßtype.stringnet/url.vs·4_type.[]stringnet/url.key·3ÿtype.string "".tag¿type.string"".nameŸtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer*"à™ßàùßàú° b„ "sb Y!i: ¨¨  ·>.w5H¤žŠžˆU*Tgclocals·cec9627e2837f98af62e9c7580b3baccTgclocals·a296c3305421c42d8fb7cfcda86446d7ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ6"".(*DockerServer).tagImage †eH‹ %H„$pÿÿÿH;AwèëâHìH‹œ$0H‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰´$ÐH‰t$H‰¬$ØH‰l$èH‹\$ Hƒû„uH‹H‹kH‰”$€H‰¬$ˆH‹œ$H‰$Hƒ<$„;Hƒ$xèH‹”$€H‹Œ$ˆHH‰$H‹œ$H‹«H‰l$H‰”$ÐH‰T$H‰Œ$ØH‰L$èH‹¬$H‹T$ ¶\$(Hƒú„À€ûufH‰,$Hƒ<$tRHƒ$xèH‹œ$ H‰$H‹´$(H‰t$H5Hl$H‰ïH¥H¥HÇD$ ”èèHÄÉ%ë¥H‰,$Hƒ<$„:Hƒ$xèH‹œ$H‰$Hƒ<$„ Hƒ$xèH‹œ$H‰$Hƒ<$„àHƒ$xH QjèYYH…À…³H‹œ$0H‹kH‰,$èH‹D$HH‹H‰”$ H‹KH‰Œ$¨HÇD$@HÇD$H1íH9è…§1É1ÀH‰L$@H‰L$pH‰D$HH‰D$xH‹œ$0H‹kH‰,$èH‹D$HH‹H‰”$H‹KH‰Œ$˜HÇD$PHÇD$X1íH9è…{1Ò1ÉH‰T$PH‰T$`H‰L$XH‰L$hHƒùtYH‹t$pH‹|$xH‰´$ÐH‰4$H‰¼$ØH‰|$HH|$H‰ÞH¥H¥H‰T$ H‰L$(èH‹\$0H‰\$pH‹\$8H‰\$xH‹\$pH‰œ$ÀH‹\$xH‰œ$ÈH‹”$€H‹Œ$ˆHH‰$H‹œ$H‹«H‰l$H‰”$ÐH‰T$H‰Œ$ØH‰L$èH‹\$ Hƒû„†H‹+H‰¬$°H‹kH‰¬$¸HH‰$H‹œ$H‹«H‰l$Hœ$ÀH‰\$Hœ$°H‰\$èHÇD$ÉH‹œ$(H‰$H‹œ$ H‹[0ÿÓèHÄÉésÿÿÿHÇ„$øHÇ„$HÇ„$HH‰$H‰D$H‰”$ÐH‰T$H‰Œ$ØH‰L$èH‹L$ ¶\$(H‰ØH‰ËHƒùtRH‹H‰”$øH‹IH‰Œ$H‹kH‰¬$<t"HƒùtH‰ÓHƒùv H‹H‹KéÞýÿÿè 1Ò1ÉéÎýÿÿ‰ëªHÇ„$àHÇ„$èHÇ„$ðHH‰$H‰D$H‰”$ÐH‰T$H‰Œ$ØH‰L$èH‹D$ ¶\$(HƒøtPH‹0H‰´$àH‹PH‰”$èH‹hH‰¬$ð€ûtHƒútHƒúv H‹H‹Féºüÿÿè 1É1Àéªüÿÿ‰ë¬èHÄÉ%éüÿÿ‰%éçûÿÿ‰%éºûÿÿ‰é9ûÿÿ‰%é¹úÿÿ‰é„úÿÿH +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars| go.string."name"˜,type.map[string]stringè4runtime.mapaccess1_fastströ*sync.(*RWMutex).RLock¤,type.map[string]string’4runtime.mapaccess2_faststrþ.sync.(*RWMutex).RUnlock¾2go.string."No such image"ònet/http.Errorþ&runtime.deferreturnÒ.sync.(*RWMutex).RUnlock”(sync.(*RWMutex).LockÚ2sync.(*RWMutex).Unlock·fê"runtime.deferprocª(net/url.(*URL).Query go.string."repo"„ +(net/url.(*URL).Queryœ +go.string."tag"” go.string.":"Ê *runtime.concatstring3Ô ,type.map[string]stringÂ4runtime.mapaccess1_faststrœ,type.map[string]stringŠ$runtime.mapassign1Ô +Ü&runtime.deferreturnÐ&type.net/url.Values 4runtime.mapaccess2_faststrÚ$runtime.panicindexÎ&type.net/url.Valuesž4runtime.mapaccess2_faststrÈ$runtime.panicindexò&runtime.deferreturn@ ."".autotmp_0952type.*[]string"".autotmp_0949¿type.string"".autotmp_0948type.string"".autotmp_0947Ÿtype.string"".autotmp_0946type.string"".autotmp_0945type.int"".autotmp_0944type.string"".autotmp_0943&type.net/url.Values"".autotmp_0941type.string"".autotmp_0939type.string"".autotmp_0937type.string "".~r0ÿtype.stringnet/url.vs·4/type.[]stringnet/url.key·3ÿtype.string "".~r0Ÿtype.stringnet/url.vs·4_type.[]stringnet/url.key·3ßtype.string"".newTagßtype.string"".newRepo¿type.string"".nameŸtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerF% åŸ g¾Ÿ ŠŸ K v¦ %y!o: !6mh YÌ#¿·   B1‰„jÙ£|d&e]bUmTgclocals·8375af20f91e3bf26f9f4b100ffb7d0eTgclocals·d85b7b7d175a77beb9795a1053a5aaecü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ<"".(*DockerServer).removeImageàÞeH‹ %H„$ ÿÿÿH;AwèëâHì`H‹œ$€H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$¸H‰T$H‰¬$ÀH‰l$èH‹\$ Hƒû„!H‹ H‹kH‰Œ$ˆH‰¬$H‹œ$hH‰$Hƒ<$„çHƒ$xèHÇD$XHÇD$`H‹Œ$ˆH‹„$HH‰$H‹œ$hH‹«H‰l$H‰Œ$¸H‰L$H‰„$ÀH‰D$èH‹L$ ¶\$(Hƒù„bH‹H‰T$xH‹IH‰Œ$€€ût:H‹¬$ˆH‹„$H‰”$ˆH‰Œ$H‰¬$¸H‰l$XH‰„$ÀH‰D$`HÇ„$ÈHÇ„$ÐHÇ„$ØH‹œ$hH‹«H¼$1ÀèHH‰$H‰l$Hœ$H‰\$èH‹”$H‹œ$1íH9네H‹œ$Hƒû„wH‹ H‹CH‹œ$Hƒû„WH‹+H‰l$hH‹kH‰l$pH‰Œ$¸H‰L$HH‰„$ÀH‰D$PH9Ð…H‰ $H‰D$H‹¬$ˆH‰l$H‰T$è¶\$ €û„ëH‹”$ÈH‹Œ$ÐH‹œ$ØH‰ØH)ËHƒû}OHH‰$H‰”$øH‰T$H‰Œ$H‰L$H‰„$H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$H‰„$H‰ÓH‰”$øH‰ÍHkíHëH‰$H‹\$hH‰\$H‹\$pH‰\$èH‹”$øH‹Œ$H‹„$H‰”$ÈH‰Œ$ÐH‰„$ØHœ$H‰$èH‹”$H‹œ$1íH9ë…\þÿÿH‹œ$hH‰$Hƒ<$„»Hƒ$xèH‹œ$hH‰$H‹œ$ˆH‰\$H‹œ$H‰\$èH‹\$(H‰\$@H‹L$0H‹\$8H‰œ$ HƒùH‰Œ$˜tcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$pH‰$H‹œ$xH‰\$H‰”$¸H‰T$H‰Œ$ÀH‰L$HÇD$ ”èèHÄ`ÃHÇD$ÌH‹œ$xH‰$H‹œ$pH‹[0ÿÓH‹œ$hH‰$Hƒ<$„³Hƒ$xèH‹œ$hH‰$Hƒ<$„†Hƒ$xH QjèH‹”$xYYH…À…QH‹œ$ÐHƒû=H‹rhHH‰$H‹Z`H‹BhH‹jpH‹l$@H9Ń HiíHëH‰\$H‰ðHÿÈH‰ÓH‹R`H‹KhH‹kpH‰¬$ðH‰ÓH‰”$àH‰ÅH‰Œ$èH9ȃ»HiíHëH‰\$èH‹”$hH‹JhHÿÉH‹rpH9΂„H‹z`H‰¼$àH‰z`H‰Œ$èH‰JhH‰´$ðH‰rpH‹\$`HƒûtCH‹\$XH‰œ$¨H‹\$`H‰œ$°HH‰$H‹ªH‰l$Hœ$¨H‰\$èèHÄ`Ãè è è ëèHÄ`É%énþÿÿ‰%éAþÿÿ‰%é9ýÿÿ‰é¢ûÿÿ‰é‚ûÿÿ‰é—úÿÿ‰%é úÿÿ‰éØùÿÿF +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars|go.string."id"˜,type.map[string]stringè4runtime.mapaccess1_fastströ*sync.(*RWMutex).RLockÈ,type.map[string]string¶4runtime.mapaccess2_faststrˆØ runtime.duffzero–,type.map[string]stringÌ&runtime.mapiterinit€ + runtime.eqstringò +type.[]stringä "runtime.growslice’ 4runtime.writebarrierstring”&runtime.mapiternextŒ.sync.(*RWMutex).RUnlockâ@"".(*DockerServer).findImageByIDÎ +ànet/http.Errorì&runtime.deferreturnÆ +„(sync.(*RWMutex).LockÊ2sync.(*RWMutex).Unlock·fÚ"runtime.deferprocºXtype.github.com/fsouza/go-dockerclient.Image´.runtime.writebarrierfat”,type.map[string]stringØ"runtime.mapdeleteä&runtime.deferreturnþ$runtime.panicsliceŒ$runtime.panicindexš$runtime.panicindex®&runtime.deferreturn@À4"".autotmp_0975type.uint64"".autotmp_0974type.uint64"".autotmp_0973type.int"".autotmp_0968Ïtype.[]string"".autotmp_0967type.string"".autotmp_0966type.string"".autotmp_0964ïtype.string"".autotmp_0963type.int"".autotmp_0962type.int"".autotmp_0961type.int"".autotmp_0960type.string"".autotmp_0958Ÿ6type.map.iter[string]string"".autotmp_0957,type.map[string]string"".autotmp_0956type.string"".autotmp_0954Ïtype.string "".errtype.error"".index¿type.int"".taggedID¯type.string "".tagïtype.string"".tags¯type.[]string "".imgÏtype.string "".tagtype.string +"".id¯type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerD%ÀÜ¿Àh¿À$¿ÀM° –Î %y!{:$¬<ë, +!GU#!>< C    N1‰`ËšrWA<+7|ö’€Tgclocals·f691ab09c838bb4c8855d6461c0f447dTgclocals·ffed5bc34d491524ef9a5d2e5f55f2f5ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ>"".(*DockerServer).inspectImageÀ¶eH‹ %H„$ùÿÿH;AwèëâHìðH‹œ$H‰$èH‹L$HH‹3H‹kHH‰$H‰L$H‰´$˜H‰t$H‰¬$ H‰l$èH‹\$ Hƒû„ H‹H‹kH‰T$hH‰l$pH‹œ$øH‰$Hƒ<$„ÙHƒ$xèH‹œ$øH‰$Hƒ<$„¬Hƒ$xH QjèYYH…À…H‹L$hH‹D$pHH‰$H‹œ$øH‹«H‰l$H‰Œ$˜H‰L$H‰„$ H‰D$èH‹L$ ¶\$(Hƒù„H‹)H‰l$xL‹IL‰Œ$€€û„©H‹œ$øHƒû„æH‹S`H‹ChH‹kpH‰¬$¸E1ÀH‰„$°H‰D$0H‰”$¨H‰ÑH‹l$0I9èZH‰T$PHƒú„“H¬$ÐH‰ïH‰ÖèL‰D$8Hœ$ÐH¬$ÀH‰ïH‰ÞèH‹´$ÀH‰´$˜H‹„$ÈH‰„$ L9È…×H‰4$H‰D$H‹l$xH‰l$L‰L$èL‹Œ$€L‹D$8H‹T$P¶\$ €û„šH‹œ$H‰$H‹œ$H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$H‰$H‹œ$H‹[0ÿÓHH‰$H‹œ$H‰\$H‹œ$H‰\$èH‹T$H‹D$ H‰”$ˆH‰T$XH‰„$H‰D$`HH‰$èH‹|$H‰úHƒÿ„¯1ÀèH‰T$HH‰$Hƒ<$„ˆH‹\$XH‰\$H‹\$`H‰\$èH‹t$HH‰t$@H´$ÀH¬$àH‰ïèHH‰$Hœ$àH‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$@H‰$èèHÄðÉ%élÿÿÿ‰éJÿÿÿHÂIÿÀH‹l$0I9茦ýÿÿH‹œ$H‰$H‹´$H‰t$H5Hl$H‰ïH¥H¥HÇD$ ”èèHÄðÉéfýÿÿ‰éýÿÿ‰éÝüÿÿèHÄðÉ%éHüÿÿ‰%éüÿÿ‰éìûÿÿD +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars| go.string."name"˜,type.map[string]stringè4runtime.mapaccess1_faststrê*sync.(*RWMutex).RLock°4sync.(*RWMutex).RUnlock·fÀ"runtime.deferprocø,type.map[string]stringæ4runtime.mapaccess2_faststrªÈ runtime.duffcopyêÈ runtime.duffcopyö runtime.eqstringî +Š +0go.string."Content-Type"° +8go.string."application/json"Ø +&net/http.Header.Set¢ +¬ type.io.Writerò runtime.convI2IÈ 4type.encoding/json.EncoderÚ "runtime.newobjectŒ ð runtime.duffzeroæ 2runtime.writebarrierifaceªÈ runtime.duffcopy¸Xtype.github.com/fsouza/go-dockerclient.Imageäruntime.convT2E¨>encoding/json.(*Encoder).Encode´&runtime.deferreturnÚ*go.string."not found"Žnet/http.Errorš&runtime.deferreturnà&runtime.deferreturn@à("".autotmp_1001Ï6type.*encoding/json.Encoder"".autotmp_10006type.*encoding/json.Encoder"".autotmp_0999Ïtype.io.Writer"".autotmp_0998type.string"".autotmp_0997¿Xtype.github.com/fsouza/go-dockerclient.Image"".autotmp_0996¿Ztype.*github.com/fsouza/go-dockerclient.Image"".autotmp_0995ÿtype.int"".autotmp_0994ïtype.int"".autotmp_0992ŸXtype.github.com/fsouza/go-dockerclient.Image"".autotmp_0990\type.[]github.com/fsouza/go-dockerclient.Image"".autotmp_0989type.string"".autotmp_0987¯type.string "".~r0ß6type.*encoding/json.Encoder$encoding/json.w·2¯type.io.Writer "".imgßXtype.github.com/fsouza/go-dockerclient.Image +"".idïtype.string"".nametype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerD%à¸ÿßàrßà"ßà$  XŒ %s!6y‘fQ#…:  61ƒ~ˆ=µF?" ÈTgclocals·d8f81ddf84701f3ac250364dd80cc8faTgclocals·dfb8fdcf4d5f80c6c0f13902dba3acbbü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ:"".(*DockerServer).listEvents€æeH‹ %H„$(ÿÿÿH;AwèëâHìXH‹œ$pH‰$H‹œ$hH‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇ„$˜HÇ„$ HÇ„$¨HÇ$èH‹\$H‰\$H1ÀH‰D$@H‹l$HH9è½H‹œ$`H‰$èH‹L$HH‰D$xH‰$H‰Œ$€H‰L$èH‹\$H‰œ$°H‹\$H‰œ$¸H‹\$ H‰œ$ÀH‹D$(H‹\$0H‰\$pHƒøH‰D$ht+HÇD$ôH‹œ$pH‰$H‹œ$hH‹[0ÿÓHÄXÃH‹”$˜H‹Œ$ H‹œ$¨H‰ØH)ËHƒû}OHH‰$H‰”$H‰T$H‰Œ$H‰L$H‰„$ H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$H‰„$ H‰ÓH‰”$H‰ÍHkíHëH‰$H‹œ$°H‰\$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹”$H‹Œ$H‹„$ H‰”$˜H‰Œ$ H‰„$¨H‹D$@HÿÀH‰D$@H‹l$HH9èŒCþÿÿHÇD$ÈH‹œ$pH‰$H‹œ$hH‹[0ÿÓH‹”$˜H‹„$ H‹œ$¨H‰œ$P1ÉH‰„$HH‰D$PH‰”$@H‰ÐH‹l$PH9é¯H‰D$`Hƒø„¯H‹H‹pH‹hH‰L$XH‰”$øH‰´$H‰¬$H‰”$ÈH‰”$(H‰´$ÐH‰´$0H‰¬$ØH‰¬$8Hœ$ˆHÇHÇCHœ$ˆHƒû„'HÇÂHÇÁH‰œ$àH‰”$èH‰Œ$ðHH‰$Hœ$(H‰\$èH‹D$H‹L$H‹œ$àH‰$H‰D$xH‰D$H‰Œ$€H‰L$èHH‰$H‹œ$hH‰\$H‹œ$pH‰\$èH\$H,$H‰ïH‰ÞH¥H¥H‹œ$àH‰\$H‹œ$èH‰\$H‹œ$ðH‰\$ èHÇ$ÈèH‹\$HiÛ@BH‰$èH‹D$`H‹L$XHƒÀHÿÁH‹l$PH9éŒQþÿÿHÄXÉéÒþÿÿ‰éJþÿÿ. +00runtime.morestack_noctxtz +–0go.string."Content-Type"¼8go.string."application/json"ä&net/http.Header.SetÆmath/rand.Intn¦@"".(*DockerServer).generateEvent¾btype.*github.com/fsouza/go-dockerclient.APIEventsô*encoding/json.MarshalÀ +¢type.[][]uint8”"runtime.growsliceè2runtime.writebarriersliceÈ + +Îtype.[]uint8úruntime.convT2EÞ2runtime.writebarrierifaceìtype.io.Writer²runtime.convI2I°fmt.FprintlnÊmath/rand.Intnôtime.Sleep@°0"".autotmp_1022"type.interface {}"".autotmp_1020ï&type.[]interface {}"".autotmp_1019¿type.[]uint8"".autotmp_1018ïtype.*[]uint8"".autotmp_1017type.int"".autotmp_1016type.int"".autotmp_1012type.int"".autotmp_1011type.[][]uint8"".autotmp_1010type.int"".autotmp_1009_type.[]uint8"".autotmp_1008Ÿ(type.[1]interface {}"".autotmp_1007type.[][]uint8"".autotmp_1006type.int"".autotmp_1005/type.[][]uint8"".autotmp_1003ÿtype.int"".dŸtype.[]uint8 "".errßtype.error"".dataÏtype.[]uint8"".i¯type.int"".countŸtype.int"".eventsÿtype.[][]uint8"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer(%°Ä¯°º¯°À T® %Q$n #þ #·"0=e¾ijq˜› \Tgclocals·7a383875e23784cb158d762414ce6278Tgclocals·c566b50610b1be520e5dd3f364f95be9ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ:"".(*DockerServer).pingDocker€xeH‹ %H;awèëêHƒìHÇD$ÈH‹\$(H‰$H‹\$ H‹[0ÿÓHƒÄà + 0runtime.morestack_noctxtj +@ "".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer !@Ô   +5 Tgclocals·ee0e5af169bfc1eef210605652a1df80Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).generateEventàÖeH‹ %H;awèëêHƒì`HÇD$(HÇD$0HÇ$èH‹D$HƒøYHƒø…(HH‹+H‰l$(H‹kH‰l$0H‹\$hH‰$èH‹\$H‰\$8H‹\$H‰\$@èH‹$‹L$H‹D$‰L$PH‰D$XH‰\$HH½ nˆñÿÿÿHëH‰\$HH‰$èH‹D$H‰D$ H‰$Hƒ<$„‡Hƒ$H‹\$8H‰\$H‹\$@H‰\$èH‹\$ H‰$Hƒ<$tPH‹\$(H‰\$H‹\$0H‰\$èH‹D$ Hƒøt(Hh HH‰ïH‰ÞH¥H¥H‹l$H‰h0H‰D$pHƒÄ`ÉëÔ‰%막%émÿÿÿHƒø…æþÿÿHH‹+H‰l$(H‹kH‰l$0éÉþÿÿHƒøuHH‹+H‰l$(H‹kH‰l$0é¦þÿÿHƒø…œþÿÿHH‹+H‰l$(H‹kH‰l$0éþÿÿ + 0runtime.morestack_noctxtjmath/rand.Intnª$go.string."create"è:"".(*DockerServer).generateIDštime.Now‚`type.github.com/fsouza/go-dockerclient.APIEvents”"runtime.newobject‚4runtime.writebarrierstringÔ4runtime.writebarrierstring€2go.string."mybase:latest"Ž"go.string."start"Ô go.string."stop"¢&go.string."destroy" À"".autotmp_1036btype.*github.com/fsouza/go-dockerclient.APIEvents"".autotmp_1032Otype.string "".~r0type.int64time.t·2/type.time.Time"".eventTypeotype.string "".~r0btype.*github.com/fsouza/go-dockerclient.APIEvents"".s*type.*"".DockerServerÀÆ¿ÀðNÜ  +"Ë + + +  + +4Xt)ÇTgclocals·7ba969af8c72fca351526f5bd553df36Tgclocals·76950f6d0769389d26192c168dbb78a0ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ8"".(*DockerServer).loadImage€xeH‹ %H;awèëêHƒìHÇD$ÈH‹\$(H‰$H‹\$ H‹[0ÿÓHƒÄà + 0runtime.morestack_noctxtj +@ "".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer !@„  +5 Tgclocals·ee0e5af169bfc1eef210605652a1df80Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ6"".(*DockerServer).getImage ŽeH‹ %H;awèëêHƒì(HÇD$ÈH‹\$@H‰$H‹\$8H‹[0ÿÓH‹\$@H‰$H‹\$8H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHƒÄ(à + 0runtime.morestack_noctxtj +’ +®0go.string."Content-Type"Ô6go.string."application/tar"ü&net/http.Header.Set@P"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServerPlO +ŒK +5[Tgclocals·ee0e5af169bfc1eef210605652a1df80Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþL"".(*DockerServer).createExecContainer€!ø eH‹ %H„$ ýÿÿH;AwèëâHì`H‹œ$€H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$H‰T$H‰¬$H‰l$èH‹\$ Hƒû„®H‹ H‹kH‹œ$hH‰$H‰Œ$ H‰L$H‰¬$¨H‰l$èH‹\$H‰\$HH‹D$(H‹\$0H‰œ$¸HƒøH‰„$°t]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$pH‰$H‹œ$xH‰\$H‰Œ$H‰L$H‰„$H‰D$HÇD$ ”èHÄ`ÃHH‰$èH‹\$H‰\$xH‹œ$hH‰$èH‹l$H‹T$H¼$h1ÀèH‰¬$H‰¬$hH‰”$H‰”$pH‹t$HHƒþ„{H¬$ØH‰ïèHH‰$H‹\$xH‰\$Hœ$hH‰\$èHH‰$èH‹\$H‰\$pHH‰$H‹¼$€Hƒÿ„Ho@H|$H‰îH¥H¥èH‹L$H‹D$ H‰Œ$H‰Œ$H‰„$H‰„$˜HH‰$èH‹L$H‰ÏHƒù„ž1ÀèH‰L$`H‰ $Hƒ<$„wH‹œ$H‰\$H‹œ$˜H‰\$èH‹L$`H‹D$pH‰ $H‰ÁHH‰„$ÀH‰D$H‰Œ$ÈH‰L$èH‹T$pH‹L$H‹D$ H‰„$¸HƒùH‰Œ$°t]H‰$H‹Y ÿÓH‹L$H‹D$H‹œ$pH‰$H‹œ$xH‰\$H‰Œ$H‰L$H‰„$H‰D$HÇD$ ôèHÄ`ÃH‹jHƒýŽŒH‹\$xH‰$Hƒ$(Hƒ$ H‹JH‹BH‹jH‰¬$0H‰Œ$ HƒøH‰„$(†DHl$H‰ïH‰ÎH¥H¥èH‹D$pH‹hHƒýŽH‹pH‹HHƒù‚ÿH‹\$xH‰$Hƒ$(Hƒ$0H‹@H‰ÊHÿÊH‰ñHÿÉHƒùtHƒÀH‰„$ H‰D$H‰”$(H‰T$H‰Œ$0H‰L$èH‹œ$hH‰$Hƒ<$„ƒHƒ$0èH‹\$xH‰\$XH‹œ$hHƒû„VH‹SH‹K H‹C(H‰”$PH‰Œ$XH‰„$`H‰ÃH)ËHƒû}OHH‰$H‰”$8H‰T$H‰Œ$@H‰L$H‰„$HH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÍHÿÅH‰¬$@H‰„$HH‰”$8HÊH‰$H‹\$XH‰\$èH‹”$8H‹Œ$@H‹„$HH‹œ$hH‰$Hƒ<$„gHƒ$H‰”$PH‰T$H‰Œ$XH‰L$H‰„$`H‰D$èH‹œ$hH‰$Hƒ<$„Hƒ$0èHÇD$ÈH‹œ$xH‰$H‹œ$pH‹[0ÿÓH‹œ$xH‰$H‹œ$pH‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHH‰$H‹œ$pH‰\$H‹œ$xH‰\$èH‹L$H‹D$ H‰Œ$ðH‰Œ$€H‰„$øH‰„$ˆHH‰$èH‹L$H‰ÏHƒù„1ÀèH‰L$PH‰ $Hƒ<$„ñH‹œ$€H‰\$H‹œ$ˆH‰\$èH‹\$PH‰\$@HH‰$HÇD$èH‹D$HH‹+H‰¬$àH‹kH‰¬$èH‹\$xH‹+H‰¬$ÐH‹kH‰¬$ØHH‰$H‰D$hH‰D$Hœ$àH‰\$Hœ$ÐH‰\$èH‹\$@H‰$H‹L$hHH‰„$ÀH‰D$H‰Œ$ÈH‰L$èHÄ`É%éÿÿÿ‰éáþÿÿ‰%éáýÿÿ‰%éýÿÿ‰é£üÿÿ‰%éqüÿÿè éNüÿÿè éBüÿÿ‰%é}úÿÿ‰é[úÿÿ‰éóùÿÿ‰é~ùÿÿ‰éKøÿÿp +00runtime.morestack_noctxtdŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars|go.string."id"˜,type.map[string]stringè4runtime.mapaccess1_faststrê@"".(*DockerServer).findContainerÖ +ènet/http.Error†dtype.github.com/fsouza/go-dockerclient.ExecInspect˜"runtime.newobjectÎ:"".(*DockerServer).generateID€„ runtime.duffzeroþ¤ runtime.duffcopyŒdtype.github.com/fsouza/go-dockerclient.ExecInspectÌ.runtime.writebarrierfatÚptype.github.com/fsouza/go-dockerclient.CreateExecOptionsì"runtime.newobjectŽtype.io.Readeräruntime.convI2IÆ 4type.encoding/json.DecoderØ "runtime.newobjectŠ +Ä runtime.duffzeroð +2runtime.writebarrieriface  rtype.*github.com/fsouza/go-dockerclient.CreateExecOptionsÞ >encoding/json.(*Decoder).DecodeÀ +Ò net/http.Error¨4runtime.writebarrierstring¤2runtime.writebarriersliceæ(sync.(*RWMutex).LockŒjtype.[]*github.com/fsouza/go-dockerclient.ExecInspectþ"runtime.growslice†.runtime.writebarrierptrÆ2runtime.writebarriersliceˆ,sync.(*RWMutex).UnlockÒ +† +¢0go.string."Content-Type"È8go.string."application/json"ð&net/http.Header.Setþtype.io.WriterÄruntime.convI2I¦4type.encoding/json.Encoder¸"runtime.newobjectêð runtime.duffzeroÐ2runtime.writebarrierifaceò,type.map[string]string–runtime.makemap®go.string."Id"¢,type.map[string]stringü$runtime.mapassign1¦,type.map[string]stringä>encoding/json.(*Encoder).Encodeú$runtime.panicslice’ $runtime.panicindex@À D"".autotmp_1068Ÿ +type.string"".autotmp_1067ÿ type.string"".autotmp_1066Ÿ 6type.*encoding/json.Encoder"".autotmp_10656type.*encoding/json.Encoder"".autotmp_1064ß type.io.Writer"".autotmp_1063type.uint64"".autotmp_1062type.uint64"".autotmp_1061type.int"".autotmp_1060type.int"".autotmp_1059Ïjtype.[]*github.com/fsouza/go-dockerclient.ExecInspect"".autotmp_1058Ÿjtype.[]*github.com/fsouza/go-dockerclient.ExecInspect"".autotmp_1057 ftype.*github.com/fsouza/go-dockerclient.ExecInspect"".autotmp_1054ÿ 6type.*encoding/json.Decoder"".autotmp_10536type.*encoding/json.Decoder"".autotmp_1052¿ type.io.Reader"".autotmp_1051,type.map[string]string"".autotmp_1049jtype.[]*github.com/fsouza/go-dockerclient.ExecInspect"".autotmp_1046type.string"".autotmp_1043ïdtype.github.com/fsouza/go-dockerclient.ExecInspect"".autotmp_1042type.string"".autotmp_1041type.string"".autotmp_1039Ÿ type.string"".autotmp_1038ï ,type.map[string]string"".&execÏ ftype.*github.com/fsouza/go-dockerclient.ExecInspect"".¶msß rtype.*github.com/fsouza/go-dockerclient.CreateExecOptions "".~r0¿ 6type.*encoding/json.Encoder$encoding/json.w·2¿ type.io.Writer$encoding/json.r·2Ÿ type.io.Reader "".errß +type.error"".container¯ btype.*github.com/fsouza/go-dockerclient.Container +"".idÿ +type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer8%À š¿ À ´¿ À È¿ À ‚Àš–%iGU +u†UZk!°!#Qú    +  `1š<:L72 ³ +~­D` ù L#s4Tgclocals·a484a676faa0084ad5f98b43c17e101cTgclocals·b3446cef6b648ddae3c01581a04ccc0bü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþJ"".(*DockerServer).startExecContainer€ ê eH‹ %H;awèëêHƒìxH‹œ$˜H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰T$hH‰T$H‰l$pH‰l$èH‹\$ Hƒû„¸H‹ H‹kH‹œ$€H‰$H‰L$8H‰L$H‰l$@H‰l$èH‹\$H‰\$0H‹D$ H‹\$(H‰\$PHƒøH‰D$H…<H‹œ$€H‰$Hƒ<$„Hƒ$0èH‹\$0HÇÅ@ˆkH‹œ$€H‰$Hƒ<$„ÜHƒ$0èH‹L$8H‹D$@HH‰$H‹œ$€H‹«H‰l$H‰L$hH‰L$H‰D$pH‰D$èH‹D$ ¶\$(H‹(€û„ÊH‹]H‰êÿÓH‹\$8H‰\$XH‹\$@H‰\$`HH‰$H‹œ$€H‹«H‰l$H\$XH‰\$èH‹œ$€H‰$Hƒ<$tcHƒ$0èH‹\$01í@ˆkH‹œ$€H‰$Hƒ<$t2Hƒ$0èHÇD$ÈH‹œ$H‰$H‹œ$ˆH‹[0ÿÓHƒÄxÉ%ëʼn%ë”HH‹ H‹CHH‰$H‹œ$€H‹«H‰l$H‰L$hH‰L$H‰D$pH‰D$èH‹D$ ¶\$(H‹(€û„%ÿÿÿH‹]H‰êÿÓHH‹+H‰l$XH‹kH‰l$`HH‰$H‹œ$€H‹«H‰l$H\$XH‰\$èéÑþÿÿ‰%éþÿÿ‰%éÛýÿÿHÇD$”H‹œ$H‰$H‹œ$ˆH‹[0ÿÓHƒÄxÉéAýÿÿ2 + 0runtime.morestack_noctxtNŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsfgo.string."id"‚,type.map[string]stringÆ4runtime.mapaccess1_faststr¼4"".(*DockerServer).getExecÎ(sync.(*RWMutex).Lock°,sync.(*RWMutex).UnlockÒ,type.map[string]func()´4runtime.mapaccess2_fastströ +¨,type.map[string]func()ö"runtime.mapdelete°(sync.(*RWMutex).Lock€,sync.(*RWMutex).UnlockÊ +‚ go.string."*"ž ,type.map[string]func()€ +4runtime.mapaccess2_faststr + +Ì +go.string."*"ü +,type.map[string]func()Ê "runtime.mapdeleteÎ +@ð"".autotmp_1089type.*func()"".autotmp_1087type.string"".autotmp_1086type.string"".autotmp_1085?type.string"".autotmp_1084type.string"".autotmp_1082type.string "".err_type.error"".execftype.*github.com/fsouza/go-dockerclient.ExecInspect +"".idtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer(ð‘ïðïðÀhÔcM!!X B + #  \ F  #%.&wIÔ(€eCTgclocals·8375af20f91e3bf26f9f4b100ffb7d0eTgclocals·349a065d14b607627da67d5600b2511aü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþL"".(*DockerServer).resizeExecContainer€ìeH‹ %H;awèëêHƒì`H‹œ$€H‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰T$PH‰T$H‰l$XH‰l$èH‹\$ Hƒût}H‹ H‹kH‹\$hH‰$H‰L$0H‰L$H‰l$8H‰l$èH‹L$xH‹D$pH‹T$ H‹\$(H‰\$HHƒúH‰T$@uHÇD$ÈH‰ $H‹X0ÿÓHƒÄ`ÃHÇD$”H‰ $H‹X0ÿÓHƒÄ`Éé|ÿÿÿ + 0runtime.morestack_noctxtNŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsfgo.string."id"‚,type.map[string]stringÆ4runtime.mapaccess1_faststr®4"".(*DockerServer).getExec  +Ð +@À "".autotmp_1091type.string "".err?type.error +"".id_type.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer&À¼¿À¿À€$€_F  &ÚTgclocals·ee0e5af169bfc1eef210605652a1df80Tgclocals·f883d3996c76325fd1714d4e3de9fa33ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþN"".(*DockerServer).inspectExecContainer    eH‹ %HD$àH;AwèëåHì H‹œ$ÀH‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$H‰T$H‰¬$˜H‰l$èH‹\$ Hƒû„ÅH‹ H‹kH‹œ$¨H‰$H‰L$PH‰L$H‰l$XH‰l$èH‹Œ$¸H‹„$°H‹\$H‰\$0H‹T$ H‹\$(H‰\$hHƒúH‰T$`…FHÇD$ÈH‰ $H‹X0ÿÓH‹œ$¸H‰$H‹œ$°H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHH‰$H‹œ$°H‰\$H‹œ$¸H‰\$èH‹L$H‹D$ H‰Œ$€H‰L$@H‰„$ˆH‰D$HHH‰$èH‹L$H‰ÏHƒùtr1ÀèH‰L$8H‰ $Hƒ<$tRH‹\$@H‰\$H‹\$HH‰\$èH‹L$8H‹D$0H‰ $H‰ÁHH‰D$pH‰D$H‰L$xH‰L$èHĠÉ%륉ëŠHÇD$”H‰ $H‹X0ÿÓHĠÉé4þÿÿ* +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrØ4"".(*DockerServer).getExecò +¦ +Â0go.string."Content-Type"è8go.string."application/json"&net/http.Header.Setžtype.io.Writeräruntime.convI2Iº4type.encoding/json.EncoderÌ"runtime.newobjectöð runtime.duffzeroÈ2runtime.writebarrierifaceøftype.*github.com/fsouza/go-dockerclient.ExecInspectª>encoding/json.(*Encoder).Encodeþ +@À"".autotmp_1100Ï6type.*encoding/json.Encoder"".autotmp_10996type.*encoding/json.Encoder"".autotmp_1098?type.io.Writer"".autotmp_1094type.string$encoding/json.w·2¿type.io.Writer "".errtype.error"".execßftype.*github.com/fsouza/go-dockerclient.ExecInspect +"".idŸtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer&"Àþ¿À'¿ÀÐ2’"i]QÍ .ˬ>1<Tgclocals·cec9627e2837f98af62e9c7580b3baccTgclocals·04f43ee17c64d5db43a23c286d1bf236ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ4"".(*DockerServer).getExecà Ä eH‹ %HD$ØH;AwèëåHì¨HÇ„$ÈHÇ„$ÐHÇ„$ØH‹œ$°H‰$Hƒ<$„yHƒ$0èH‹œ$°H‰$Hƒ<$„LHƒ$0H QjèL‹„$ÐYYH…À…H‹œ$°Hƒû„þH‹SH‹C H‹k(H‰¬$ 1öH‰„$˜H‰D$(H‰”$H‹l$(H9î¼H‰T$HH‹H‰t$0H‰\$8Hƒû„¢H‹;H‰¼$€H‹KH‰Œ$ˆL9ÁuoH‰<$H‰L$H‹¬$¸H‰l$L‰D$èL‹„$ÀH‹t$0H‹T$H¶\$ €ût3H‹\$8H‰œ$ÈHÇ„$ÐHÇ„$ØèHĨÃHƒÂHÿÆH‹l$(H9îŒDÿÿÿHH‹+H‰l$`H‹kH‰l$hHÇD$PHÇD$XHH‰$èH‹L$H‰L$@H‰ $Hƒ<$„£H‹\$`H‰\$H‹\$hH‰\$èH‹\$@H‰\$@H‹ 1íH9étCH‹T$@H‰L$pH‰T$xHÇ„$ÈH‰L$PH‰Œ$ÐH‰T$XH‰”$ØèHĨÃHH‰$HH‰\$HH‰\$èH‹L$뎉%éQÿÿÿ‰éWþÿÿ‰éûýÿÿèHĨÉ%é¨ýÿÿ‰%é{ýÿÿ$ +*0runtime.morestack_noctxtÆ*sync.(*RWMutex).RLockŒ4sync.(*RWMutex).RUnlock·fœ"runtime.deferprocˆ runtime.eqstring–&runtime.deferreturnÞ4go.string."exec not found"².type.errors.errorStringÄ"runtime.newobject¨4runtime.writebarrierstringÊBgo.itab.*errors.errorString.errorÎ &runtime.deferreturnì 0type.*errors.errorString‚ +type.errorš +Bgo.itab.*errors.errorString.error® + runtime.typ2Itabü +&runtime.deferreturn`Ð"".autotmp_1111otype.error"".autotmp_1110Ï0type.*errors.errorString"".autotmp_1109Otype.string"".autotmp_1107¿htype.**github.com/fsouza/go-dockerclient.ExecInspect"".autotmp_1106ÿtype.int"".autotmp_1105ïtype.int"".autotmp_11040type.*errors.errorString"".autotmp_1103/jtype.[]*github.com/fsouza/go-dockerclient.ExecInspect "".~r0¯type.errorerrors.text·2type.string"".execßftype.*github.com/fsouza/go-dockerclient.ExecInspect "".~r2@type.error "".~r10ftype.*github.com/fsouza/go-dockerclient.ExecInspect +"".idtype.string"".s*type.*"".DockerServerD"ÐiúÏÐÛÏÐVÏÐ&ð6¨F!>Xg3 +‚(báFX2R1&4Tgclocals·dbefa26e1f0ee62688488e90e23fcbd7Tgclocals·4459bbba29917b2fee408f0dbbff89b1ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ<"".(*DockerServer).findNetworkÀ¶eH‹ %HD$ÐH;AwèëåHì°HÇ„$ÐHÇ„$ØHÇ„$àHÇ„$èH‹œ$¸H‰$Hƒ<$„&H$°èH‹œ$¸H‰$Hƒ<$„öH$°H QjèH‹”$ØYYH…À…¾H‹œ$¸Hƒû„¥L‹ƒ˜H‹ƒ H‹«¨H‰¬$¨1öH‰„$ H‰D$0L‰„$˜H‹l$0H9îAL‰D$PI‹8H‰t$8H‰t$(H‰|$@Hƒÿ„;H‹OH‹GH9Ð…ŒH‰Œ$ˆH‰ $H‰„$H‰D$H‹¬$ÀH‰l$H‰T$èL‹D$PH‹|$@H‹t$8H‹”$ȶ\$ €ût;H‰¼$ÐH‹\$(H‰œ$ØHÇ„$àHÇ„$èèHÄ°ÃH‰ûHƒÿ„ŠH‹?H‰¼$ˆH‹KH‰Œ$H9ÑuEH‰<$H‰L$H‹¬$ÀH‰l$H‰T$èL‹D$PH‹|$@H‹t$8H‹”$ȶ\$ €û…WÿÿÿIƒÀHÿÆH‹l$0H9þÿÿHH‹+H‰l$hH‹kH‰l$pHÇD$XHÇD$`HH‰$èH‹L$H‰L$HH‰ $Hƒ<$„µH‹\$hH‰\$H‹\$pH‰\$èH‹\$HH‰\$HH‹ 1íH9étRH‹T$HH‰L$xH‰”$€HÇ„$ÐHÇ„$ØÿÿÿÿH‰L$XH‰Œ$àH‰T$`H‰”$èèHÄ°ÃHH‰$HH‰\$HH‰\$èH‹L$é|ÿÿÿ‰%é?ÿÿÿ‰éoþÿÿ‰é¾ýÿÿ‰éTýÿÿèHİÉ%éþüÿÿ‰%éÎüÿÿ& +*0runtime.morestack_noctxtä*sync.(*RWMutex).RLock°4sync.(*RWMutex).RUnlock·fÀ"runtime.deferprocÒ runtime.eqstringú&runtime.deferreturnœ runtime.eqstringž 6go.string."No such network"ò .type.errors.errorString„ +"runtime.newobjectè +4runtime.writebarrierstringŠ Bgo.itab.*errors.errorString.error¬ &runtime.deferreturnÊ 0type.*errors.errorStringà type.errorø Bgo.itab.*errors.errorString.errorŒ  runtime.typ2Itabî &runtime.deferreturnpà$"".autotmp_1123otype.error"".autotmp_1122Ï0type.*errors.errorString"".autotmp_1121type.string"".autotmp_1120Otype.string"".autotmp_1118¿`type.**github.com/fsouza/go-dockerclient.Network"".autotmp_1117ÿtype.int"".autotmp_1116ïtype.int"".autotmp_11150type.*errors.errorString"".autotmp_1114/btype.[]*github.com/fsouza/go-dockerclient.Network "".~r0¯type.errorerrors.text·2type.string"".networkß^type.*github.com/fsouza/go-dockerclient.Network"".itype.int "".~r3Ptype.error "".~r2@type.int "".~r10^type.*github.com/fsouza/go-dockerclient.Network"".idOrNametype.string"".s*type.*"".DockerServerD"à{šßàØßà`ßà :¾R$Afq;n +”,q÷SRt2a10+Tgclocals·8bd789dcce9d4daa4c4bb84dfe47e247Tgclocals·4459bbba29917b2fee408f0dbbff89b1ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ>"".(*DockerServer).listNetworksÀÀeH‹ %H„$PÿÿÿH;AwèëâHì01ÀH¼$èèH‹œ$8H‰$Hƒ<$„ÉH$°èH‹œ$8H‹« HH‰$HÇD$H‰l$èL‹T$L‹L$ L‹D$(L‰”$ˆL‰Œ$L‰„$˜H‹œ$8Hƒû„OH‹‹˜H‹ƒ H‹«¨H‰¬$È1ÒH‰„$ÀH‰D$@H‰Œ$¸H‰ÈH‹l$@H9ê*H‰D$`H‹0H‰T$HHƒþ„ìH¬$èH‰ïèL‰ÒL‰ÉL‰ÀL‰ÃL)ËHƒû}OHH‰$H‰”$ÐH‰T$H‰Œ$ØH‰L$H‰„$àH‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$ØH‰„$àHH‰$H‰ÓH‰”$ÐH‰ÍHkíHHëH‰\$Hœ$èH‰\$èL‹”$ÐL‹Œ$ØL‹„$àL‰”$ˆL‰ÉL‰Œ$L‰„$˜H‹D$`H‹T$HHƒÀHÿÂH‹l$@H9êŒÖþÿÿH‹œ$8H‰$Hƒ<$„¶H$°èH‹œ$HH‰$H‹œ$@H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$HH‰$H‹œ$@H‹[0ÿÓHH‰$H‹œ$@H‰\$H‹œ$HH‰\$èH‹L$H‹D$ H‰L$xH‰L$hH‰„$€H‰D$pHH‰$èH‹|$H‰ùHƒÿ„Á1ÀèH‰L$XH‰ $Hƒ<$„šH‹\$hH‰\$H‹\$pH‰\$èH‹\$XH‰\$PH‹œ$ˆH‰œ$ H‹œ$H‰œ$¨H‹œ$˜H‰œ$°HH‰$Hœ$ H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$PH‰$èHÄ0É%éZÿÿÿ‰é8ÿÿÿ‰%é>þÿÿ‰é ýÿÿ‰éªüÿÿ‰%é+üÿÿ4 +00runtime.morestack_noctxt`Ü runtime.duffzero¨*sync.(*RWMutex).RLockÔ`type.[]github.com/fsouza/go-dockerclient.Network‚"runtime.makesliceÌ„ runtime.duffcopy„`type.[]github.com/fsouza/go-dockerclient.Networkö"runtime.growsliceÎ\type.github.com/fsouza/go-dockerclient.Network®.runtime.writebarrierfatš .sync.(*RWMutex).RUnlockÒ +î 0go.string."Content-Type"” +8go.string."application/json"¼ +&net/http.Header.Set† + type.io.WriterÖ runtime.convI2I¦ 4type.encoding/json.Encoder¸ "runtime.newobjectê ð runtime.duffzeroÄ 2runtime.writebarrierifaceÆ`type.[]github.com/fsouza/go-dockerclient.Networkòruntime.convT2E¶>encoding/json.(*Encoder).Encode@à$"".autotmp_1144¯6type.*encoding/json.Encoder"".autotmp_11436type.*encoding/json.Encoder"".autotmp_1142ïtype.io.Writer"".autotmp_1137`type.[]github.com/fsouza/go-dockerclient.Network"".autotmp_1136\type.github.com/fsouza/go-dockerclient.Network"".autotmp_1134Ÿ`type.**github.com/fsouza/go-dockerclient.Network"".autotmp_1133ßtype.int"".autotmp_1132type.int"".autotmp_1131Ÿ`type.[]github.com/fsouza/go-dockerclient.Network"".autotmp_1128ïbtype.[]*github.com/fsouza/go-dockerclient.Network"".autotmp_1127¿`type.[]github.com/fsouza/go-dockerclient.Network"".autotmp_1126Ïtype.int "".~r0¿6type.*encoding/json.Encoder$encoding/json.w·2type.io.Writer"".resultÏ`type.[]github.com/fsouza/go-dockerclient.Network"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer%àÁßà9 HÔ4$Tdþ$Q#š  *S§\vÏFW"FTgclocals·908986cc2bd23e6b2b43c6b331d27560Tgclocals·429e38e879552ec65b0c30795e04b14bü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ<"".(*DockerServer).networkInfoÀ +¢ +eH‹ %HD$ØH;AwèëåHì¨H‹œ$ÈH‰$èH‹D$HH‹H‹kHH‰$H‰D$H‰”$˜H‰T$H‰¬$ H‰l$èH‹\$ Hƒû„H‹ H‹kH‹œ$°H‰$H‰L$XH‰L$H‰l$`H‰l$èH‹\$H‰\$8H‹D$(H‹\$0H‰\$pHƒøH‰D$ht]H‰$H‹X ÿÓH‹L$H‹D$H‹œ$¸H‰$H‹œ$ÀH‰\$H‰Œ$˜H‰L$H‰„$ H‰D$HÇD$ ”èHĨÃH‹œ$ÀH‰$H‹œ$¸H‹[ ÿÓH‹t$H‰4$H5Hl$H‰ïH¥H¥HHl$H‰ïH‰ÞH¥H¥èHÇD$ÈH‹œ$ÀH‰$H‹œ$¸H‹[0ÿÓHH‰$H‹œ$¸H‰\$H‹œ$ÀH‰\$èH‹L$H‹D$ H‰Œ$ˆH‰L$HH‰„$H‰D$PHH‰$èH‹L$H‰ÏHƒùtu1ÀèH‰L$@H‰ $Hƒ<$tUH‹\$HH‰\$H‹\$PH‰\$èH‹L$@H‹D$8H‰ $H‰ÁHH‰D$xH‰D$H‰Œ$€H‰L$èHĨÉ%뢉뇉éóýÿÿ, +*0runtime.morestack_noctxt^Œgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsvgo.string."id"’,type.map[string]stringâ4runtime.mapaccess1_faststrØ<"".(*DockerServer).findNetwork¸ +Ênet/http.Error’ +®0go.string."Content-Type"Ô8go.string."application/json"ü&net/http.Header.SetÆ +Ðtype.io.Writer–runtime.convI2Iì4type.encoding/json.Encoderþ"runtime.newobject¨ð runtime.duffzeroú2runtime.writebarrierifaceª ^type.*github.com/fsouza/go-dockerclient.Networkâ >encoding/json.(*Encoder).Encode@Ð"".autotmp_1156Ï6type.*encoding/json.Encoder"".autotmp_11556type.*encoding/json.Encoder"".autotmp_1154?type.io.Writer"".autotmp_1151type.string"".autotmp_1149type.string$encoding/json.w·2¿type.io.Writer "".errtype.error"".networkß^type.*github.com/fsouza/go-dockerclient.Network +"".idŸtype.string"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer("ÐŽÏÐËÏÐ# 6ì"i> UQ#Ð .›µ>40Tgclocals·cec9627e2837f98af62e9c7580b3baccTgclocals·04f43ee17c64d5db43a23c286d1bf236ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ"".isValidNameàÒeH‹ %H;awèëêHƒì(H‹D$8Hƒøt:H‹t$0H‰4$H‰D$H5Hl$H‰ïH¥H¥è¶\$ €ûu +ÆD$@HƒÄ(ÃÆD$@HƒÄ(à + 0runtime.morestack_noctxtlgo.string."."Ž strings.Contains0P "".~r1 type.bool"".nametype.stringPDOP Op†6 + +F*Tgclocals·a08e9001cb8f9d822225de3b8e406515Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ@"".(*DockerServer).createNetwork ŒeH‹ %H„$èþÿÿH;AwèëâHì˜HH‰$èH‹\$H‰\$hH‹œ$¸Hƒû„îH‹S@H‹kHH‰¬$ØH‰,$H‰”$ÐHƒú„ÁHZ SjèYYH…À…œHH‰$H‹¼$¸Hƒÿ„xHo@H|$H‰îH¥H¥èH‹L$H‹D$ H‰Œ$ðH‰Œ$€H‰„$øH‰„$ˆHH‰$èH‹|$H‰ùHƒÿ„1ÀèH‰L$XH‰ $Hƒ<$„éH‹œ$€H‰\$H‹œ$ˆH‰\$èH‹T$XH‹L$hH‰$H‰ÊH H‰Œ$ÀH‰L$H‰”$ÈH‰T$èH‹L$H‹T$ H‰”$¨HƒùH‰Œ$ tcH‰$H‹Y ÿÓH‹T$H‹L$H‹œ$¨H‰$H‹œ$°H‰\$H‰”$H‰T$H‰Œ$H‰L$HÇD$ èèHĘÃH‹t$hH‹>Hƒÿ„÷H7H<$H¥H¥è¶\$€ûuHH‹œ$¨H‰$H‹´$°H‰t$H5Hl$H‰ïH¥H¥HÇD$ èèHĘÃH‹œ$ H‰$H‹t$hH‹>Hƒÿ„pH7H|$H¥H¥èH‹\$1íH9ëtHH‹œ$¨H‰$H‹´$°H‰t$H5Hl$H‰ïH¥H¥HÇD$ “èèHĘÃH‹œ$ H‰$èH‹L$H‹D$H‰Œ$H‰„$˜HH‰$èH‹T$hH‹L$H¼$P1ÀèH‹:Hƒÿ„£HH¼$PH‰ÞH¥H¥H‹œ$H‰œ$`H‹´$˜H‰´$hH‹:Hƒÿ„]HwH¼$pH¥H¥HH‰$H‰L$`H‰L$Hœ$PH‰\$èH‹œ$ H‰$Hƒ<$„H$°èH‹\$`H‰\$PH‹œ$ Hƒû„ÓH‹“˜H‹‹ H‹›¨H‰”$8H‰Œ$@H‰œ$HH‰ØH)ËHƒû}OHH‰$H‰”$ H‰T$H‰Œ$(H‰L$H‰„$0H‰D$HÇD$ èH‹T$(H‹L$0H‹D$8H‰ÎHÿÆH‰´$(H‰„$0H‰”$ HÊH‰$H‹\$PH‰\$èH‹”$ H‹Œ$(H‹„$0H‹œ$ H‰$Hƒ<$„ÛH$˜H‰”$8H‰T$H‰Œ$@H‰L$H‰„$HH‰D$èH‹œ$ H‰$Hƒ<$„„H$°èHÇD$ÉH‹œ$°H‰$H‹œ$¨H‹[0ÿÓHœ$°HÇHÇCH‹|$`HoH¼$°H‰îH¥H¥HH‰$H‹œ$¨H‰\$H‹œ$°H‰\$èH‹L$H‹D$ H‰Œ$àH‰L$pH‰„$èH‰D$xHH‰$èH‹|$H‰ùHƒÿ„®1ÀèH‰L$HH‰ $Hƒ<$„‡H‹\$pH‰\$H‹\$xH‰\$èH‹t$HH‰t$@H´$°H¬$H‰ïH¥H¥HH‰$Hœ$H‰\$èH\$Hl$H‰ïH‰ÞH¥H¥H‹\$@H‰$èèHĘÉ%émÿÿÿ‰éKÿÿÿ‰%épþÿÿ‰%éþÿÿ‰é&ýÿÿ‰%éñüÿÿ‰éœüÿÿ‰éVüÿÿ‰é‰ûÿÿ‰éûÿÿ‰%é úÿÿ‰ééùÿÿ‰éùÿÿèHĘÉé8ùÿÿ‰é ùÿÿ` +00runtime.morestack_noctxtPxtype.*github.com/fsouza/go-dockerclient.CreateNetworkOptionsb"runtime.newobjectþ"runtime.deferproc¢type.io.Readerøruntime.convI2IÚ4type.encoding/json.Decoderì"runtime.newobjectžÄ runtime.duffzero„2runtime.writebarrieriface´ztype.**github.com/fsouza/go-dockerclient.CreateNetworkOptionsò>encoding/json.(*Decoder).DecodeÊ +Ünet/http.Errorè&runtime.deferreturn¼"".isValidName @go.string."Invalid network name"Ä net/http.ErrorÐ &runtime.deferreturn¾ +<"".(*DockerServer).findNetwork– Dgo.string."network already exists"Ê net/http.ErrorÖ &runtime.deferreturnˆ :"".(*DockerServer).generateIDÊ \type.github.com/fsouza/go-dockerclient.NetworkÜ "runtime.newobjectŽ Ü runtime.duffzeroÔ\type.github.com/fsouza/go-dockerclient.Network”.runtime.writebarrierfatÜ(sync.(*RWMutex).Lock”btype.[]*github.com/fsouza/go-dockerclient.Network†"runtime.growsliceŽ.runtime.writebarrierptrÔ2runtime.writebarriersliceœ,sync.(*RWMutex).Unlockæ +Îtype.io.Writer”runtime.convI2Iê4type.encoding/json.Encoderü"runtime.newobject®ð runtime.duffzeroˆ2runtime.writebarrierifaceØ2type.struct { ID string }„runtime.convT2EÈ>encoding/json.(*Encoder).EncodeÔ&runtime.deferreturnØ&runtime.deferreturn@°2"".autotmp_1180Ÿ6type.*encoding/json.Encoder"".autotmp_11796type.*encoding/json.Encoder"".autotmp_1178ïtype.io.Writer"".autotmp_1173ïbtype.[]*github.com/fsouza/go-dockerclient.Network"".autotmp_1172¿btype.[]*github.com/fsouza/go-dockerclient.Network"".autotmp_1171^type.*github.com/fsouza/go-dockerclient.Network"".autotmp_1170ÿ6type.*encoding/json.Decoder"".autotmp_11696type.*encoding/json.Decoder"".autotmp_1168Ïtype.io.Reader"".autotmp_1167¯2type.struct { ID string }"".autotmp_1166btype.[]*github.com/fsouza/go-dockerclient.Network"".autotmp_1165\type.github.com/fsouza/go-dockerclient.Network"".autotmp_1164type.string"".autotmp_1162type.string"".&networkï^type.*github.com/fsouza/go-dockerclient.Network"".&configßztype.**github.com/fsouza/go-dockerclient.CreateNetworkOptions "".~r0¯6type.*encoding/json.Encoder$encoding/json.w·2Ïtype.io.Writer$encoding/json.r·2¯type.io.Reader"".cÏ2type.struct { ID string }"".generatedIDtype.string "".errïtype.error"".r0,type.*net/http.Request"".w8type.net/http.ResponseWriter"".s*type.*"".DockerServer`%°Wú¯°s¯°‚¯°¾¯°¯°ÐŒ”%OU,:;:+ +’ +$¼$#/„    &j0NwL7-xD=F*œ$•Dc 4F>"¨Tgclocals·a484a676faa0084ad5f98b43c17e101cTgclocals·770edcce2fa22bec4aa18365adf4562eü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþR"".*DockerServer.("".commitContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚D"".(*DockerServer).commitContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÈP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþP"".*DockerServer.("".listContainers)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚B"".(*DockerServer).listContainers0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÊP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþR"".*DockerServer.("".createContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚D"".(*DockerServer).createContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÌP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþT"".*DockerServer.("".inspectContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚F"".(*DockerServer).inspectContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÎP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþR"".*DockerServer.("".renameContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚D"".(*DockerServer).renameContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÐP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþL"".*DockerServer.("".topContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚>"".(*DockerServer).topContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÒP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþP"".*DockerServer.("".startContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚B"".(*DockerServer).startContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÔP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþN"".*DockerServer.("".stopContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚@"".(*DockerServer).stopContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÖP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþP"".*DockerServer.("".pauseContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚B"".(*DockerServer).pauseContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÚP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþT"".*DockerServer.("".unpauseContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚F"".(*DockerServer).unpauseContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÜP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþN"".*DockerServer.("".waitContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚@"".(*DockerServer).waitContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PÞP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþR"".*DockerServer.("".attachContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚D"".(*DockerServer).attachContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PàP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþR"".*DockerServer.("".removeContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚D"".(*DockerServer).removeContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PâP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþZ"".*DockerServer.("".createExecContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚L"".(*DockerServer).createExecContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PäP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþP"".*DockerServer.("".statsContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚B"".(*DockerServer).statsContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PæP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþZ"".*DockerServer.("".resizeExecContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚L"".(*DockerServer).resizeExecContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PèP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþX"".*DockerServer.("".startExecContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚J"".(*DockerServer).startExecContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PêP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ\"".*DockerServer.("".inspectExecContainer)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚N"".(*DockerServer).inspectExecContainer0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PìP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþF"".*DockerServer.("".pullImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚8"".(*DockerServer).pullImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PîP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþH"".*DockerServer.("".buildImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚:"".(*DockerServer).buildImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PðP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþH"".*DockerServer.("".listImages)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚:"".(*DockerServer).listImages0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PòP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþJ"".*DockerServer.("".removeImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚<"".(*DockerServer).removeImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PôP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþL"".*DockerServer.("".inspectImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚>"".(*DockerServer).inspectImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PöP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþF"".*DockerServer.("".pushImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚8"".(*DockerServer).pushImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PøP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".*DockerServer.("".tagImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚6"".(*DockerServer).tagImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PúP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþH"".*DockerServer.("".listEvents)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚:"".(*DockerServer).listEvents0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PüP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþH"".*DockerServer.("".pingDocker)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚:"".(*DockerServer).pingDocker0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PþP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþF"".*DockerServer.("".loadImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚8"".(*DockerServer).loadImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?P€P +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþD"".*DockerServer.("".getImage)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚6"".(*DockerServer).getImage0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?P‚P +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþL"".*DockerServer.("".listNetworks)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚>"".(*DockerServer).listNetworks0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?P„P +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþJ"".*DockerServer.("".networkInfo)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚<"".(*DockerServer).networkInfo0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?P†P +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþN"".*DockerServer.("".createNetwork)·fm ”eH‹ %H;awèëêHƒì H‹ZH‰$H‹\$(H‰\$H‹\$0H‰\$H‹\$8H‰\$èHƒÄ à + "runtime.morestack‚@"".(*DockerServer).createNetwork0@ +"".a1 ,type.*net/http.Request +"".a08type.net/http.ResponseWriter@/?PˆP +@Tgclocals·099986b79bd4df464b634a14757f9178Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ"".func·001€úeH‹ %H„$øþÿÿH;AwèëâHìˆH‹BH‹ZH‰\$pH‰D$hH‹H‹«èH¼$81ÀèHH‰$H‰l$Hœ$8H‰\$èH‹œ$81íH9ë„AH‹œ$@Hƒû„H‹ H‹CH‹œ$8Hƒû„ÿH‹+H‰¬$ˆH‹kH‰¬$H‰Œ$¸H‰„$ÀH‰L$xH‰ $H‰„$€H‰D$H‹´$ H‹~Hƒÿ„¥Hw8H|$H¥H¥è¶\$ H‰ØH‹L$(H‹\$0H‰œ$°HƒùH‰Œ$¨t]H‰$H‹Y ÿÓH‹L$H‹D$H‹œ$H‰$H‹œ$˜H‰\$H‰Œ$¸H‰L$H‰„$ÀH‰D$HÇD$ èHĈÃ<…½Hœ$8H‰$èH‹œ$81íH9ë…¿þÿÿH‹\$hH‹+Hƒý„H‹•ðH‹øH‹H‰œ$01ÀH‰Œ$(H‰L$HH‰”$ H‰ÑH‹l$HH9èVH‰L$`H‹)H‰D$PH‰D$@H‰èHH‹H‹kHH‰$H‰D$XH‰D$H‰”$¸H‰T$H‰¬$ÀH‰l$èH‹\$ Hƒû„ÍH‹ H‹kH‰Œ$ÈH‰ $H‰¬$ÐH‰l$H‹´$ H‹~Hƒÿ„Hw8H|$H¥H¥è¶\$ H‰ØH‹L$(H‹\$0H‰œ$ HƒùH‰Œ$˜t]H‰$H‹Y ÿÓH‹L$H‹D$H‹œ$H‰$H‹œ$˜H‰\$H‰Œ$¸H‰L$H‰„$ÀH‰D$HÇD$ èHĈÃ<uZH‹L$`H‹D$PHƒÁHÿÀH‹l$HH9茪þÿÿH‹œ$H‰$H‹œ$˜H‰\$H‹œ$ H‰\$H‹\$pH‹H‹ÿÓHĈÃHH‹ H‹kHH‰$H‹\$XH‰\$H‰Œ$¸H‰L$H‰¬$ÀH‰l$èH‹\$ Hƒû„CH‹ H‹kH‹œ$H‰$H‹œ$˜H‰\$H‰Œ$ÈH‰L$H‰¬$ÐH‰l$HÇD$ èH‹|$@H‹L$hH‹)H‹•H9ú‚×H‹L‹“ðI‰ùI‰ÐH‰øHÿÀH‹)H‹½H‹)H‹•øH9‚H‹H‰ÆH‹‹ðH)ÂH‰ûH)ÃH‰ØHƒût H‰óHÁãHËH‰ÙH‰Œ$ðH‰×H‰„$L‰”$L‰ÖL‰Œ$L‰„$L‰ÈL‰Œ$àH‰”$øHÐL‰„$èL)ÀHƒø~[HH‰$H‰´$ØH‰t$L‰L$L‰D$H‰D$ èL‹Œ$H‹¼$øH‹t$(H‹\$0H‰œ$àH‹\$8H‰œ$èH‰´$ØJ,ÎH‰,$H‹œ$ðH‰\$H‰ûHÁãH‰\$èH‹”$H‹¬$øH‹„$èH‹´$ØHêH‰´$ØH‰”$àH‰„$èH‹\$hH‹+H‰,$Hƒ<$tm£5iÁŒð +Ô]‚ }%Tgclocals·9672a07f1a450fc594d7cd9cb2c95495Tgclocals·0809678294a6ccf1679e4ac422a0f629ü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ"".init  eH‹ %H;awèëêHƒì¶€ût¶€ûuHƒÄÃè ÆèèèèèèèèèèèèèèèHH,$H‰ïH‰ÞH¥H¥èH‹D$HH‰$H‰D$èÆHƒÄÃ4 + 0runtime.morestack_noctxt:"".initdone·R"".initdone·p"runtime.throwinit€"".initdone·ŒŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.init–¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.init Lgithub.com/fsouza/go-dockerclient.initªtime.init´sync.init¾strings.initÈstrconv.initÒregexp.initÜnet/http.initænet.initðmath/rand.initúfmt.init„$encoding/json.initŽ crypto/rand.init˜ archive/tar.init¦Pgo.string."^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"Ì$regexp.MustCompileä"".nameRegexp€.runtime.writebarrierptrŒ"".initdone·00/0˜/ÐΑ4’  7™Tgclocals·3280bececceccd33cb74587feedb1f9fTgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ(type..hash.[8]stringàÎeH‹ %H;awèëêHƒì0H‹L$H1ÀHÇD$ H‹l$ H9è}eH‰D$(H‰ÅH‰ÈHÁÀH»¿c»kïRH¯ØH‰ØH‹\$8HƒûtDHkíHëH‰$HÇD$H‰D$HH‰D$èH‹L$H‹D$(HÿÀH‹l$ H9è|›H‰L$PHƒÄ0Éë¸ + 0runtime.morestack_noctxtüruntime.strhash@` "".autotmp_1229type.int"".autotmp_1228type.int "".~r30type.uintptr"".h type.uintptr"".stype.uintptr"".ptype.*[8]string`ˆ_` °° +}3Tgclocals·2dc77d960dd3e4b3de2361f9cbd75783Tgclocals·3280bececceccd33cb74587feedb1f9fü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþ$type..eq.[8]stringàÈeH‹ %H;awèëêHƒìX1ÀHÇD$(H‹l$(H9è’H‰D$0H‹\$`Hƒû„–H‰ÅHkíHëH‹3H‹KH‹\$hHƒûtvH‰ÅHkíHëH‹H‹CH9ÁuVH‰t$HH‰4$H‰L$PH‰L$H‰T$8H‰T$H‰D$@H‰D$è¶\$ €ût H‹D$0HÿÀH‹l$(H9èŒnÿÿÿÆD$xHƒÄXÃÆD$xHƒÄXÉ놉écÿÿÿ + 0runtime.morestack_noctxt runtime.eqstring@°"".autotmp_1233?type.string"".autotmp_1232type.string"".autotmp_1231_type.int"".autotmp_1230Otype.int "".~r30type.bool"".s type.uintptr"".qtype.*[8]string"".ptype.*[8]string&°´¯° ¯°ðð  PTgclocals·9c703c5c7b9c1932c840b69f8ebce236Tgclocals·44568aa369055d8938d809aa5d80843bü/Users/csparr/ws/go/src/github.com/influxdb/telegraf/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.goþgo.string."/containers/{id:.*}"PH/containers/{id:.*} >go.string."/containers/{id:.*}"þ$go.string."DELETE"0.DELETE $go.string."DELETE"þHgo.string."/containers/{id:.*}/exec"`R/containers/{id:.*}/exec Hgo.string."/containers/{id:.*}/exec"þJgo.string."/containers/{id:.*}/stats"`T/containers/{id:.*}/stats Jgo.string."/containers/{id:.*}/stats"þ@go.string."/exec/{id:.*}/resize"PJ/exec/{id:.*}/resize @go.string."/exec/{id:.*}/resize"þ>go.string."/exec/{id:.*}/start"PH/exec/{id:.*}/start >go.string."/exec/{id:.*}/start"þ/images/create 4go.string."/images/create"þ$go.string."/build"0./build $go.string."/build"þ0go.string."/images/json"@: /images/json 0go.string."/images/json"þ6go.string."/images/{id:.*}"@@/images/{id:.*} 6go.string."/images/{id:.*}"þDgo.string."/images/{name:.*}/json"PN/images/{name:.*}/json Dgo.string."/images/{name:.*}/json"þDgo.string."/images/{name:.*}/push"PN/images/{name:.*}/push Dgo.string."/images/{name:.*}/push"þBgo.string."/images/{name:.*}/tag"PL/images/{name:.*}/tag Bgo.string."/images/{name:.*}/tag"þ&go.string."/events"00/events &go.string."/events"þ$go.string."/_ping"0./_ping $go.string."/_ping"þ0go.string."/images/load"@: /images/load 0go.string."/images/load"þ>go.string."/images/{id:.*}/get"PH/images/{id:.*}/get >go.string."/images/{id:.*}/get"þ*go.string."/networks"@4 /networks *go.string."/networks"þ:go.string."/networks/{id:.*}"PD/networks/{id:.*} :go.string."/networks/{id:.*}"þTgclocals·76225bbef6ae6e9e5960f6f7925b8185@@€ "þTgclocals·2c09ec81c5cb12328d7183f25bc48833@@þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·e8c55b930b09fa5028b5e4b78b8932dc +þTgclocals·31214a5fe2ac06a8b2e85038c37289d6  +þTgclocals·5197b04b6fafdc0c7d1822cc34066683 ŠŠþTgclocals·31214a5fe2ac06a8b2e85038c37289d6  +þTgclocals·5197b04b6fafdc0c7d1822cc34066683 ŠŠþTgclocals·f29b89ce4cd57d8100665fbda8fdf405 "þTgclocals·1765c43755fbf91dfae87195c1ec24fb  +ŠŠþ"go.string."error"0,error "go.string."error"þgo.string."url"0(url go.string."url"þTgclocals·20f42599a700c1a4b6c6ede24ef4e8a600ŠþTgclocals·9a90374975a8610a14ef231e086acf2900 +ŠŠŠŠþTgclocals·8d600a433c6aaa81a4fe446d95c5546b þTgclocals·bd51743682bd6c0f7b9f2e8e6dffed99  + +þTgclocals·0528ab8f76149a707fd2f0025c2178a3þTgclocals·519efd86263089ddb84df3cfe7fd2992þTgclocals·85223f890d4c8f80203775beed82eadd +þTgclocals·925be0824eaf197a56a5d7050bf29309  +ÊÊþ,Bgo.itab.*errors.errorString.errorþ>go.string."container not found"PHcontainer not found >go.string."container not found"þTgclocals·0e8ff9f111235a6bccca3fa33f624774``."•”%þTgclocals·1e2d550ac4f017d716d87ff44946577f88 J%e J%e J%e J%e J%e þTgclocals·d64e51a4c4bfeaa840e480961ec6b0b3þTgclocals·519efd86263089ddb84df3cfe7fd2992þ&go.string."http://"00http:// &go.string."http://"þgo.string."/"0$/ go.string."/"þTgclocals·f883d3996c76325fd1714d4e3de9fa33 þTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·3901c619f635162fa423fe138099ace5((  ªU° ªUþTgclocals·bc335ce91c3a8b5f426dd201465802bd((®®®þ,Ägo.itab.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router.net/http.HandlerþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þTgclocals·be18fcff1e4d1cf801d0b47f660b980688("þTgclocals·ab01a2d55089ff50c402006df1039c3988 + + + + +þgo.string."all"0(all go.string."all"þgo.string."1"0$1 go.string."1"þgo.string." "0$  go.string." "þ"go.string."%s %s"0,%s %s "go.string."%s %s"þgo.string."/%s"0(/%s go.string."/%s"þ0go.string."Content-Type"@: Content-Type 0go.string."Content-Type"þ8go.string."application/json"PBapplication/json 8go.string."application/json"þTgclocals·a74ca190396b92ed76efe93c61653942ˆˆ”ˆH"”ˆH"”ˆˆH"”ˆ ü‹H"”ˆü‹H"”ˆ"ü‹H"”ˆ "ü‹H"”ˆ/" ü‹H"”ˆ/"ü‹H"”€ ü‹H"”€ü‹H"”€ü‹H"”€ ü‹H"”ˆH"”°ˆH"” ˆH"”ˆH"”ˆH"”þTgclocals·02bfe185cbfa386cc6696a665007ff28¨¨®®®®®®®®®®®®®®®®®®®þTgclocals·e0b091cc964057ade987c1196ae02e2e vˆP‰ ˆP‰ ˆ”""•$˜ ¢ˆ ‚"J €ªV€ˆ”""•$˜ ¢ˆ ‚"J €ªV€ˆ”""•$˜ ¢ˆ ‚"J€ªV ‚€ªV€ˆ”""•$˜ ¢ˆ ‚"JÀþTgclocals·f7ba1512b6938de3ab7810c798567682pp ®®®®®®®®®®®®þTgclocals·660c52760819425e2fa6ae9a8a8ae931  þTgclocals·f7cb58e18cf0f9d3ee7dc7385e94aef7  +Š,þ2go.string."No such image"@< No such image 2go.string."No such image"þTgclocals·52c6e5e411ef106b9194437a527f5a0f  "ˆH)"RI‚ ‚ Šˆ (¢þTgclocals·c958acb0df1ea67178a15bee7623bbbd88 +Š´ + + +þ go.string."name"0*name go.string."name"þDgo.string."Invalid container name"PNInvalid container name Dgo.string."Invalid container name"þ&go.string."0.0.0.0"000.0.0.0 &go.string."0.0.0.0"þ0go.string."172.16.42.%d"@: 172.16.42.%d 0go.string."172.16.42.%d"þ.go.string."172.16.42.1"@8 172.16.42.1 .go.string."172.16.42.1"þ&go.string."docker0"00docker0 &go.string."docker0"þlgo.string."there's already a container with this name"€v*there's already a container with this name lgo.string."there's already a container with this name"þTgclocals·37832594314d999d6d2b5ee19691d36f€€,€ €ªZ €ªZ €ˆªZ € ªZ ˆ ˆ€ €ˆ€ €ˆ€@  €ˆ€e  €ˆ€À e  €ˆ€Àe  €Àƒ ¥VR–¢"""¢ + ‚À ¥VR–¢"""¢ + ‚À ¥VR–¢"""¢ + €À ¥VR–¢"""¢ + €€€ €€€ ÀþTgclocals·d1e6514bc516778716e9d38209cf4ab8€€®®®®®®®®®®®®®®®®®®®®®®®®®®®®®®þgo.string."%x"0&%x go.string."%x"þTgclocals·80320eec1018401d2b0daec3b250b99e00ÀÀþTgclocals·1ee14e32cec51f1cde6c2b0577d8188700þgo.string."id"0&id go.string."id"þTgclocals·5699c890da9a4c1a61d89d978591d077@@(þTgclocals·bc335ce91c3a8b5f426dd201465802bd((®®®þTgclocals·04f43ee17c64d5db43a23c286d1bf23600² +þTgclocals·cec9627e2837f98af62e9c7580b3bacc00®®®®þ$go.string."stream"0.stream $go.string."stream"þTgclocals·bdc1cfaf863af97c7b8d007001384e8a€€8,( + +eUUUUUUUUUU‚ ‚ BU%TUþTgclocals·7a383875e23784cb158d762414ce6278HH®®®®®®®þNgo.string."Container %s is not running"`XContainer %s is not running Ngo.string."Container %s is not running"þgo.string."UID"0(UID go.string."UID"þgo.string."PID"0(PID go.string."PID"þ go.string."PPID"0*PPID go.string."PPID"þgo.string."C"0$C go.string."C"þ"go.string."STIME"0,STIME "go.string."STIME"þgo.string."TTY"0(TTY go.string."TTY"þ go.string."TIME"0*TIME go.string."TIME"þgo.string."CMD"0(CMD go.string."CMD"þ go.string."root"0*root go.string."root"þ go.string."7535"0*7535 go.string."7535"þ go.string."7516"0*7516 go.string."7516"þgo.string."0"0$0 go.string."0"þ"go.string."03:20"0,03:20 "go.string."03:20"þgo.string."?"0$? go.string."?"þ(go.string."00:00:00"@200:00:00 (go.string."00:00:00"þTgclocals·950db48493931155c4d72d2be7776567ààPÈÈ‚ˆ‚€ ‚€‚‚À‚ ‚  þTgclocals·a484a676faa0084ad5f98b43c17e101c€€®®®®®®®®®®®®®®þJgo.string."Container already running"`TContainer already running Jgo.string."Container already running"þTgclocals·9cd0f1c7734d56b3c926d71ae19f8ec3pp$‚‚ Š þTgclocals·f3828558443ce662a87feff12c09632b@@®®®®®®þBgo.string."Container not running"PLContainer not running Bgo.string."Container not running"þTgclocals·97d2741936c7bda613787afceb8adff3((þTgclocals·bc335ce91c3a8b5f426dd201465802bd((®®®þHgo.string."Container already paused"`RContainer already paused Hgo.string."Container already paused"þTgclocals·0a4b95df80c389fe7e338059324575e1 þTgclocals·0b0af158856f2ab75a5e0667d877f9eb ®®þ@go.string."Container not paused"PJContainer not paused @go.string."Container not paused"þTgclocals·0a4b95df80c389fe7e338059324575e1 þTgclocals·0b0af158856f2ab75a5e0667d877f9eb ®®þ,Øgo.itab.*github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.StdWriter.io.WriterþHgo.string."cannot hijack connection"`Rcannot hijack connection Hgo.string."cannot hijack connection"þZgo.string."application/vnd.docker.raw-stream"pd!application/vnd.docker.raw-stream Zgo.string."application/vnd.docker.raw-stream"þJgo.string."Container %q is running\n"`RContainer %q is running + Jgo.string."Container %q is running\n"þRgo.string."Container %q is not running\n"`ZContainer %q is not running + Rgo.string."Container %q is not running\n"þ4go.string."What happened?"@>What happened? 4go.string."What happened?"þ€ <<þTgclocals·7a383875e23784cb158d762414ce6278HH®®®®®®®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·ee0e5af169bfc1eef210605652a1df80®þ$go.string."create"0.create $go.string."create"þ"go.string."start"0,start "go.string."start"þ go.string."stop"0*stop go.string."stop"þ&go.string."destroy"00destroy &go.string."destroy"þ2go.string."mybase:latest"@< mybase:latest 2go.string."mybase:latest"þTgclocals·76950f6d0769389d26192c168dbb78a088ˆ +þTgclocals·7ba969af8c72fca351526f5bd553df3688þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·ee0e5af169bfc1eef210605652a1df80®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·ee0e5af169bfc1eef210605652a1df80®þgo.string."Id"0&Id go.string."Id"þTgclocals·b3446cef6b648ddae3c01581a04ccc0b  È€€H•$‚”"XIYŠŠˆˆˆ*$€  °¢€€€€€ €€ þTgclocals·a484a676faa0084ad5f98b43c17e101c€€®®®®®®®®®®®®®®þgo.string."*"0$* go.string."*"þTgclocals·349a065d14b607627da67d5600b2511a88 +þTgclocals·8375af20f91e3bf26f9f4b100ffb7d0e88®®®®®þTgclocals·f883d3996c76325fd1714d4e3de9fa33 þTgclocals·ee0e5af169bfc1eef210605652a1df80®þTgclocals·04f43ee17c64d5db43a23c286d1bf23600² +þTgclocals·cec9627e2837f98af62e9c7580b3bacc00®®®®þ4go.string."exec not found"@>exec not found 4go.string."exec not found"þTgclocals·4459bbba29917b2fee408f0dbbff89b188"þTgclocals·dbefa26e1f0ee62688488e90e23fcbd788 +Š + + +þ6go.string."No such network"@@No such network 6go.string."No such network"þTgclocals·4459bbba29917b2fee408f0dbbff89b188"þTgclocals·8bd789dcce9d4daa4c4bb84dfe47e24788 +Š- + + +þTgclocals·429e38e879552ec65b0c30795e04b14b   8€ˆ €ˆ ‚ˆ€€ˆÀ‚€ˆ€€ˆ €ˆ €ˆþTgclocals·908986cc2bd23e6b2b43c6b331d27560XX ®®®®®®®®®þTgclocals·04f43ee17c64d5db43a23c286d1bf23600² +þTgclocals·cec9627e2837f98af62e9c7580b3bacc00®®®®þgo.string."."0$. go.string."."þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·a08e9001cb8f9d822225de3b8e406515þ@go.string."Invalid network name"PJInvalid network name @go.string."Invalid network name"þDgo.string."network already exists"PNnetwork already exists Dgo.string."network already exists"þTgclocals·770edcce2fa22bec4aa18365adf4562eààV €  "  °  þTgclocals·a484a676faa0084ad5f98b43c17e101c€€®®®®®®®®®®®®®®þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·099986b79bd4df464b634a14757f9178+þTgclocals·0809678294a6ccf1679e4ac422a0f629ÐÐL ªZ  ªZªZª € ‚ þTgclocals·9672a07f1a450fc594d7cd9cb2c95495PP++++++++þPgo.string."^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"`Z^[a-zA-Z0-9][a-zA-Z0-9_.-]+$ Pgo.string."^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·3280bececceccd33cb74587feedb1f9fþ*"".nameRegexp&type.*regexp.Regexpþ""".statictmp_0019€(type."".DockerServerþ""".statictmp_0126 type.[1]string  go.string."POST"þ""".statictmp_0130 type.[1]string  go.string."GET"þ""".statictmp_0134 type.[1]string  go.string."POST"þ""".statictmp_0138 type.[1]string  go.string."GET"þ""".statictmp_0142 type.[1]string  go.string."POST"þ""".statictmp_0146 type.[1]string  go.string."GET"þ""".statictmp_0150 type.[1]string  go.string."POST"þ""".statictmp_0154 type.[1]string  go.string."POST"þ""".statictmp_0158 type.[1]string  go.string."POST"þ""".statictmp_0162 type.[1]string  go.string."POST"þ""".statictmp_0166 type.[1]string  go.string."POST"þ""".statictmp_0170 type.[1]string  go.string."POST"þ""".statictmp_0174 type.[1]string  go.string."POST"þ""".statictmp_0178 type.[1]string  $go.string."DELETE"þ""".statictmp_0182 type.[1]string  go.string."POST"þ""".statictmp_0186 type.[1]string  go.string."GET"þ""".statictmp_0190 type.[1]string  go.string."POST"þ""".statictmp_0194 type.[1]string  go.string."POST"þ""".statictmp_0198 type.[1]string  go.string."GET"þ""".statictmp_0202 type.[1]string  go.string."POST"þ""".statictmp_0206 type.[1]string  go.string."POST"þ""".statictmp_0210 type.[1]string  go.string."GET"þ""".statictmp_0214 type.[1]string  $go.string."DELETE"þ""".statictmp_0218 type.[1]string  go.string."GET"þ""".statictmp_0222 type.[1]string  go.string."POST"þ""".statictmp_0226 type.[1]string  go.string."POST"þ""".statictmp_0230 type.[1]string  go.string."GET"þ""".statictmp_0234 type.[1]string  go.string."GET"þ""".statictmp_0238 type.[1]string  go.string."POST"þ""".statictmp_0242 type.[1]string  go.string."GET"þ""".statictmp_0246 type.[1]string  go.string."GET"þ""".statictmp_0250 type.[1]string  go.string."GET"þ""".statictmp_0254 type.[1]string  go.string."POST"þ""".statictmp_0435htype.github.com/fsouza/go-dockerclient.APIContainersþ""".statictmp_0534@jtype.[1]github.com/fsouza/go-dockerclient.PortBinding  &go.string."0.0.0.0"þ""".statictmp_0542`type.github.com/fsouza/go-dockerclient.Containeràþ""".statictmp_0647€type.[8]string€ go.string."UID"  go.string."PID"@ go.string."PPID"` go.string."C"€ "go.string."STIME"  go.string."TTY"À go.string."TIME"à go.string."CMD"þ""".statictmp_0652€type.[8]stringà go.string."root"  go.string."7535"@ go.string."7516"` go.string."0"€ "go.string."03:20"  go.string."?"À (go.string."00:00:00"þ""".statictmp_0817 Xtype.github.com/fsouza/go-dockerclient.Imageþ,"".initdone·type.uint8þ"".NewServer·f"".NewServerþnet.Listen·fnet.Listenþ(runtime.newobject·f"runtime.newobjectþ$runtime.makemap·fruntime.makemapþ4runtime.writebarrierfat·f.runtime.writebarrierfatþ@"".(*DockerServer).buildMuxer·f:"".(*DockerServer).buildMuxerþ&runtime.typ2Itab·f runtime.typ2Itabþ"net/http.Serve·fnet/http.Serveþ$runtime.newproc·fruntime.newprocþ,runtime.throwreturn·f&runtime.throwreturnþ8"".(*DockerServer).notify·f2"".(*DockerServer).notifyþ(runtime.chansend1·f"runtime.chansend1þ4runtime.writebarrierptr·f.runtime.writebarrierptrþ¦github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Path·f github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).Pathþªgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).Methods·f¤github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).MethodsþX"".*DockerServer.("".commitContainer)·fm·fR"".*DockerServer.("".commitContainer)·fmþH"".(*DockerServer).handlerWrapper·fB"".(*DockerServer).handlerWrapperþ²github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFunc·f¬github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Route).HandlerFuncþV"".*DockerServer.("".listContainers)·fm·fP"".*DockerServer.("".listContainers)·fmþX"".*DockerServer.("".createContainer)·fm·fR"".*DockerServer.("".createContainer)·fmþZ"".*DockerServer.("".inspectContainer)·fm·fT"".*DockerServer.("".inspectContainer)·fmþX"".*DockerServer.("".renameContainer)·fm·fR"".*DockerServer.("".renameContainer)·fmþR"".*DockerServer.("".topContainer)·fm·fL"".*DockerServer.("".topContainer)·fmþV"".*DockerServer.("".startContainer)·fm·fP"".*DockerServer.("".startContainer)·fmþT"".*DockerServer.("".stopContainer)·fm·fN"".*DockerServer.("".stopContainer)·fmþV"".*DockerServer.("".pauseContainer)·fm·fP"".*DockerServer.("".pauseContainer)·fmþZ"".*DockerServer.("".unpauseContainer)·fm·fT"".*DockerServer.("".unpauseContainer)·fmþT"".*DockerServer.("".waitContainer)·fm·fN"".*DockerServer.("".waitContainer)·fmþX"".*DockerServer.("".attachContainer)·fm·fR"".*DockerServer.("".attachContainer)·fmþX"".*DockerServer.("".removeContainer)·fm·fR"".*DockerServer.("".removeContainer)·fmþ`"".*DockerServer.("".createExecContainer)·fm·fZ"".*DockerServer.("".createExecContainer)·fmþV"".*DockerServer.("".statsContainer)·fm·fP"".*DockerServer.("".statsContainer)·fmþ`"".*DockerServer.("".resizeExecContainer)·fm·fZ"".*DockerServer.("".resizeExecContainer)·fmþ^"".*DockerServer.("".startExecContainer)·fm·fX"".*DockerServer.("".startExecContainer)·fmþb"".*DockerServer.("".inspectExecContainer)·fm·f\"".*DockerServer.("".inspectExecContainer)·fmþL"".*DockerServer.("".pullImage)·fm·fF"".*DockerServer.("".pullImage)·fmþN"".*DockerServer.("".buildImage)·fm·fH"".*DockerServer.("".buildImage)·fmþN"".*DockerServer.("".listImages)·fm·fH"".*DockerServer.("".listImages)·fmþP"".*DockerServer.("".removeImage)·fm·fJ"".*DockerServer.("".removeImage)·fmþR"".*DockerServer.("".inspectImage)·fm·fL"".*DockerServer.("".inspectImage)·fmþL"".*DockerServer.("".pushImage)·fm·fF"".*DockerServer.("".pushImage)·fmþJ"".*DockerServer.("".tagImage)·fm·fD"".*DockerServer.("".tagImage)·fmþN"".*DockerServer.("".listEvents)·fm·fH"".*DockerServer.("".listEvents)·fmþN"".*DockerServer.("".pingDocker)·fm·fH"".*DockerServer.("".pingDocker)·fmþL"".*DockerServer.("".loadImage)·fm·fF"".*DockerServer.("".loadImage)·fmþJ"".*DockerServer.("".getImage)·fm·fD"".*DockerServer.("".getImage)·fmþR"".*DockerServer.("".listNetworks)·fm·fL"".*DockerServer.("".listNetworks)·fmþP"".*DockerServer.("".networkInfo)·fm·fJ"".*DockerServer.("".networkInfo)·fmþT"".*DockerServer.("".createNetwork)·fm·fN"".*DockerServer.("".createNetwork)·fmþ:"".(*DockerServer).SetHook·f4"".(*DockerServer).SetHookþB"".(*DockerServer).PrepareExec·f<"".(*DockerServer).PrepareExecþ*runtime.mapassign1·f$runtime.mapassign1þD"".(*DockerServer).PrepareStats·f>"".(*DockerServer).PrepareStatsþH"".(*DockerServer).PrepareFailure·fB"".(*DockerServer).PrepareFailureþT"".(*DockerServer).PrepareMultiFailures·fN"".(*DockerServer).PrepareMultiFailuresþ(runtime.growslice·f"runtime.growsliceþ8runtime.writebarrierslice·f2runtime.writebarriersliceþD"".(*DockerServer).ResetFailure·f>"".(*DockerServer).ResetFailureþ(runtime.mapdelete·f"runtime.mapdeleteþP"".(*DockerServer).ResetMultiFailures·fJ"".(*DockerServer).ResetMultiFailuresþF"".(*DockerServer).CustomHandler·f@"".(*DockerServer).CustomHandlerþ.sync.(*RWMutex).Lock·f(sync.(*RWMutex).Lockþ2sync.(*RWMutex).Unlock·f,sync.(*RWMutex).UnlockþJ"".(*DockerServer).MutateContainer·fD"".(*DockerServer).MutateContainerþ&runtime.eqstring·f runtime.eqstringþ:runtime.writebarrierstring·f4runtime.writebarrierstringþ4"".(*DockerServer).Stop·f."".(*DockerServer).Stopþ2"".(*DockerServer).URL·f,"".(*DockerServer).URLþ0runtime.concatstring3·f*runtime.concatstring3þ>"".(*DockerServer).ServeHTTP·f8"".(*DockerServer).ServeHTTPþ0sync.(*RWMutex).RLock·f*sync.(*RWMutex).RLockþ4sync.(*RWMutex).RUnlock·f.sync.(*RWMutex).RUnlockþ(runtime.deferproc·f"runtime.deferprocþ,runtime.deferreturn·f&runtime.deferreturnþ,runtime.mapiterinit·f&runtime.mapiterinitþ,runtime.mapiternext·f&runtime.mapiternextþ*regexp.MatchString·f$regexp.MatchStringþ°github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).ServeHTTP·fªgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.(*Router).ServeHTTPþH"".(*DockerServer).DefaultHandler·fB"".(*DockerServer).DefaultHandlerþ"".func·001·f"".func·001þH"".(*DockerServer).listContainers·fB"".(*DockerServer).listContainersþ.net/url.(*URL).Query·f(net/url.(*URL).Queryþ:runtime.mapaccess2_faststr·f4runtime.mapaccess2_faststrþ*runtime.panicindex·f$runtime.panicindexþ(runtime.makeslice·f"runtime.makesliceþstrings.Join·fstrings.Joinþ$runtime.convT2E·fruntime.convT2Eþ8runtime.writebarrieriface·f2runtime.writebarrierifaceþfmt.Sprintf·ffmt.Sprintfþhgithub.com/fsouza/go-dockerclient.(*State).String·fbgithub.com/fsouza/go-dockerclient.(*State).StringþŒgithub.com/fsouza/go-dockerclient.(*NetworkSettings).PortMappingAPI·f†github.com/fsouza/go-dockerclient.(*NetworkSettings).PortMappingAPIþ,net/http.Header.Set·f&net/http.Header.Setþ$runtime.convI2I·fruntime.convI2IþDencoding/json.(*Encoder).Encode·f>encoding/json.(*Encoder).Encodeþ@"".(*DockerServer).listImages·f:"".(*DockerServer).listImagesþ>"".(*DockerServer).findImage·f8"".(*DockerServer).findImageþF"".(*DockerServer).findImageByID·f@"".(*DockerServer).findImageByIDþJ"".(*DockerServer).createContainer·fD"".(*DockerServer).createContainerþDencoding/json.(*Decoder).Decode·f>encoding/json.(*Decoder).Decodeþ"net/http.Error·fnet/http.Errorþ>regexp.(*Regexp).MatchString·f8regexp.(*Regexp).MatchStringþ math/rand.Int·fmath/rand.Intþstrconv.Itoa·fstrconv.Itoaþ*runtime.panicslice·f$runtime.panicsliceþ@"".(*DockerServer).generateID·f:"".(*DockerServer).generateIDþtime.Now·ftime.Nowþ&crypto/rand.Read·f crypto/rand.ReadþJ"".(*DockerServer).renameContainer·fD"".(*DockerServer).renameContainerþ’github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Vars·fŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Varsþ:runtime.mapaccess1_faststr·f4runtime.mapaccess1_faststrþF"".(*DockerServer).findContainer·f@"".(*DockerServer).findContainerþL"".(*DockerServer).inspectContainer·fF"".(*DockerServer).inspectContainerþH"".(*DockerServer).statsContainer·fB"".(*DockerServer).statsContainerþ(strconv.ParseBool·f"strconv.ParseBoolþD"".(*DockerServer).topContainer·f>"".(*DockerServer).topContainerþfmt.Fprintf·ffmt.FprintfþH"".(*DockerServer).startContainer·fB"".(*DockerServer).startContainerþF"".(*DockerServer).stopContainer·f@"".(*DockerServer).stopContainerþH"".(*DockerServer).pauseContainer·fB"".(*DockerServer).pauseContainerþL"".(*DockerServer).unpauseContainer·fF"".(*DockerServer).unpauseContainerþJ"".(*DockerServer).attachContainer·fD"".(*DockerServer).attachContainerþ*runtime.assertI2I2·f$runtime.assertI2I2þfmt.Fprintln·ffmt.FprintlnþF"".(*DockerServer).waitContainer·f@"".(*DockerServer).waitContainerþtime.Sleep·ftime.SleepþJ"".(*DockerServer).removeContainer·fD"".(*DockerServer).removeContainerþJ"".(*DockerServer).commitContainer·fD"".(*DockerServer).commitContainerþ8runtime.stringtoslicebyte·f2runtime.stringtoslicebyteþ4encoding/json.Unmarshal·f.encoding/json.Unmarshalþ0runtime.concatstring2·f*runtime.concatstring2þ@"".(*DockerServer).buildImage·f:"".(*DockerServer).buildImageþ,net/http.Header.Get·f&net/http.Header.Getþ:archive/tar.(*Reader).Next·f4archive/tar.(*Reader).Nextþ>"".(*DockerServer).pullImage·f8"".(*DockerServer).pullImageþ>"".(*DockerServer).pushImage·f8"".(*DockerServer).pushImageþ<"".(*DockerServer).tagImage·f6"".(*DockerServer).tagImageþB"".(*DockerServer).removeImage·f<"".(*DockerServer).removeImageþD"".(*DockerServer).inspectImage·f>"".(*DockerServer).inspectImageþ@"".(*DockerServer).listEvents·f:"".(*DockerServer).listEventsþ"math/rand.Intn·fmath/rand.IntnþF"".(*DockerServer).generateEvent·f@"".(*DockerServer).generateEventþ0encoding/json.Marshal·f*encoding/json.Marshalþ@"".(*DockerServer).pingDocker·f:"".(*DockerServer).pingDockerþ>"".(*DockerServer).loadImage·f8"".(*DockerServer).loadImageþ<"".(*DockerServer).getImage·f6"".(*DockerServer).getImageþR"".(*DockerServer).createExecContainer·fL"".(*DockerServer).createExecContainerþP"".(*DockerServer).startExecContainer·fJ"".(*DockerServer).startExecContainerþ:"".(*DockerServer).getExec·f4"".(*DockerServer).getExecþR"".(*DockerServer).resizeExecContainer·fL"".(*DockerServer).resizeExecContainerþT"".(*DockerServer).inspectExecContainer·fN"".(*DockerServer).inspectExecContainerþB"".(*DockerServer).findNetwork·f<"".(*DockerServer).findNetworkþD"".(*DockerServer).listNetworks·f>"".(*DockerServer).listNetworksþB"".(*DockerServer).networkInfo·f<"".(*DockerServer).networkInfoþ""".isValidName·f"".isValidNameþ&strings.Contains·f strings.ContainsþF"".(*DockerServer).createNetwork·f@"".(*DockerServer).createNetworkþ$runtime.memmove·fruntime.memmoveþ"".init·f"".initþ(runtime.throwinit·f"runtime.throwinitþ’github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.init·fŒgithub.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.initþ®github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.init·f¨github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy.initþRgithub.com/fsouza/go-dockerclient.init·fLgithub.com/fsouza/go-dockerclient.initþtime.init·ftime.initþsync.init·fsync.initþstrings.init·fstrings.initþstrconv.init·fstrconv.initþregexp.init·fregexp.initþ net/http.init·fnet/http.initþnet.init·fnet.initþ"math/rand.init·fmath/rand.initþfmt.init·ffmt.initþ*encoding/json.init·f$encoding/json.initþ&crypto/rand.init·f crypto/rand.initþ&archive/tar.init·f archive/tar.initþ*regexp.MustCompile·f$regexp.MustCompileþbruntime.gcbits.0x48844400000000000000000000000000 H„Dþ(go.string."[]string"@2[]string (go.string."[]string"þtype.[]string  Ó¨ó +   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P(go.string."[]string"p,go.weak.type.*[]string€"runtime.zerovaluetype.stringþ:go.typelink.[]string/[]stringtype.[]stringþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc.[8]string(type..hash.[8]stringþ,type..eqfunc.[8]string$type..eq.[8]stringþ&type..alg.[8]string 0type..hashfunc.[8]string,type..eqfunc.[8]stringþbruntime.gcbits.0x48484848484848480000000000000000 HHHHHHHHþ*go.string."[8]string"@4 [8]string *go.string."[8]string"þtype.[8]stringÀÀ€USŒ> &type..alg.[8]string0bruntime.gcbits.0x48484848484848480000000000000000P*go.string."[8]string"p.go.weak.type.*[8]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[8]string/[8]stringtype.[8]stringþbruntime.gcbits.0x88000000000000000000000000000000 ˆþJgo.string."*map.bucket[string]string"`T*map.bucket[string]string Jgo.string."*map.bucket[string]string"þYˆ à runtime.algarray0Btype..gc.map.bucket[string]string@Jtype..gcprog.map.bucket[string]stringPHgo.string."map.bucket[string]string"pLgo.weak.type.*map.bucket[string]string€"runtime.zerovalueÀ:type.map.bucket[string]stringÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]stringà(go.string."overflow"€go.weak.type.*map[string]string€"runtime.zerovaluetype.string type.string°:type.map.bucket[string]stringÀ4type.map.hdr[string]stringþ^go.typelink.map[string]string/map[string]string,type.map[string]stringþ$go.string."func()"0.func() $go.string."func()"þtype.func()€€ö¼‚ö3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P$go.string."func()"p(go.weak.type.*func()€"runtime.zerovalue €type.func()Ѐtype.func()þ(go.string."[]func()"@2[]func() (go.string."[]func()"þtype.[]func()  =Ä%   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P(go.string."[]func()"p,go.weak.type.*[]func()€"runtime.zerovaluetype.func()þ:go.typelink.[]func()/[]func()type.[]func()þbruntime.gcbits.0x88888888000000000000000000000000 ˆˆˆˆþ*go.string."[8]func()"@4 [8]func() *go.string."[8]func()"þtype.[8]func()ÀÀ@Êã×o à runtime.algarray0bruntime.gcbits.0x88888888000000000000000000000000P*go.string."[8]func()"p.go.weak.type.*[8]func()€"runtime.zerovaluetype.func() type.[]func()þ>go.typelink.[8]func()/[8]func()type.[8]func()þJgo.string."*map.bucket[string]func()"`T*map.bucket[string]func() Jgo.string."*map.bucket[string]func()"þgo.weak.type.*map[string]func()€"runtime.zerovaluetype.string type.func()°:type.map.bucket[string]func()À4type.map.hdr[string]func()þ^go.typelink.map[string]func()/map[string]func(),type.map[string]func()þJgo.string."func(string) docker.Stats"`Tfunc(string) docker.Stats Jgo.string."func(string) docker.Stats"þrtype.func(string) github.com/fsouza/go-dockerclient.Stats  ¦ÿ,3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."func(string) docker.Stats"p„go.weak.type.*func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovalue €rtype.func(string) github.com/fsouza/go-dockerclient.StatsÐrtype.func(string) github.com/fsouza/go-dockerclient.Stats€type.stringXtype.github.com/fsouza/go-dockerclient.StatsþNgo.string."[]func(string) docker.Stats"`X[]func(string) docker.Stats Ngo.string."[]func(string) docker.Stats"þvtype.[]func(string) github.com/fsouza/go-dockerclient.Stats  øô®   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PNgo.string."[]func(string) docker.Stats"pˆgo.weak.type.*[]func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovaluertype.func(string) github.com/fsouza/go-dockerclient.Statsþ¼go.typelink.[]func(string) docker.Stats/[]func(string) github.com/fsouza/go-dockerclient.Statsvtype.[]func(string) github.com/fsouza/go-dockerclient.StatsþPgo.string."[8]func(string) docker.Stats"`Z[8]func(string) docker.Stats Pgo.string."[8]func(string) docker.Stats"þxtype.[8]func(string) github.com/fsouza/go-dockerclient.StatsÀÀ@Æü à runtime.algarray0bruntime.gcbits.0x88888888000000000000000000000000PPgo.string."[8]func(string) docker.Stats"pŠgo.weak.type.*[8]func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovaluertype.func(string) github.com/fsouza/go-dockerclient.Stats vtype.[]func(string) github.com/fsouza/go-dockerclient.StatsþÀgo.typelink.[8]func(string) docker.Stats/[8]func(string) github.com/fsouza/go-dockerclient.Statsxtype.[8]func(string) github.com/fsouza/go-dockerclient.Statsþpgo.string."*map.bucket[string]func(string) docker.Stats"€z,*map.bucket[string]func(string) docker.Stats pgo.string."*map.bucket[string]func(string) docker.Stats"þ˜type.*map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Stats  t}8±6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."*map.bucket[string]func(string) docker.Stats"pªgo.weak.type.**map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovalue–type.map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Statsþngo.string."map.bucket[string]func(string) docker.Stats"€x+map.bucket[string]func(string) docker.Stats ngo.string."map.bucket[string]func(string) docker.Stats"þ–type.map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Stats°°оçü5ˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000Pngo.string."map.bucket[string]func(string) docker.Stats"p¨go.weak.type.*map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovalueÀ–type.map.bucket[string]func(string) github.com/fsouza/go-dockerclient.StatsÀ go.string."keys"àtype.[8]string$go.string."values"°xtype.[8]func(string) github.com/fsouza/go-dockerclient.Statsà(go.string."overflow"€˜type.*map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Statsþhgo.string."map.hdr[string]func(string) docker.Stats"€r(map.hdr[string]func(string) docker.Stats hgo.string."map.hdr[string]func(string) docker.Stats"þtype.map.hdr[string]func(string) github.com/fsouza/go-dockerclient.Statsàà0ÇŸ  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000Phgo.string."map.hdr[string]func(string) docker.Stats"p¢go.weak.type.*map.hdr[string]func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovalueÀtype.map.hdr[string]func(string) github.com/fsouza/go-dockerclient.StatsÀ&go.string."buckets"à˜type.*map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Stats,go.string."oldbuckets"°˜type.*map.bucket[string]func(string) github.com/fsouza/go-dockerclient.Statsþ`go.string."map[string]func(string) docker.Stats"pj$map[string]func(string) docker.Stats `go.string."map[string]func(string) docker.Stats"þˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.StatsÜÜz¬ 5Ð € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."map[string]func(string) docker.Stats"pšgo.weak.type.*map[string]func(string) github.com/fsouza/go-dockerclient.Stats€"runtime.zerovaluetype.string rtype.func(string) github.com/fsouza/go-dockerclient.Stats°–type.map.bucket[string]func(string) github.com/fsouza/go-dockerclient.StatsÀtype.map.hdr[string]func(string) github.com/fsouza/go-dockerclient.Statsþàgo.typelink.map[string]func(string) docker.Stats/map[string]func(string) github.com/fsouza/go-dockerclient.Statsˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.Statsþ4go.string."[]http.Handler"@>[]http.Handler 4go.string."[]http.Handler"þ.type.[]net/http.Handler  c´á¾   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]http.Handler"p@go.weak.type.*[]net/http.Handler€"runtime.zerovalue*type.net/http.HandlerþZgo.typelink.[]http.Handler/[]net/http.Handler.type.[]net/http.HandlerþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þDtype..hashfunc.[8]net/http.Handlergo.string."[]*docker.Container"PH[]*docker.Container >go.string."[]*docker.Container"þftype.[]*github.com/fsouza/go-dockerclient.Container  ¼‚½ù   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P>go.string."[]*docker.Container"pxgo.weak.type.*[]*github.com/fsouza/go-dockerclient.Container€"runtime.zerovaluebtype.*github.com/fsouza/go-dockerclient.Containerþœgo.typelink.[]*docker.Container/[]*github.com/fsouza/go-dockerclient.Containerftype.[]*github.com/fsouza/go-dockerclient.ContainerþBgo.string."[]*docker.ExecInspect"PL[]*docker.ExecInspect Bgo.string."[]*docker.ExecInspect"þjtype.[]*github.com/fsouza/go-dockerclient.ExecInspect  YË&6   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PBgo.string."[]*docker.ExecInspect"p|go.weak.type.*[]*github.com/fsouza/go-dockerclient.ExecInspect€"runtime.zerovalueftype.*github.com/fsouza/go-dockerclient.ExecInspectþ¤go.typelink.[]*docker.ExecInspect/[]*github.com/fsouza/go-dockerclient.ExecInspectjtype.[]*github.com/fsouza/go-dockerclient.ExecInspectþ4go.string."[]docker.Image"@>[]docker.Image 4go.string."[]docker.Image"þ\type.[]github.com/fsouza/go-dockerclient.Image  ©ÙËr   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]docker.Image"pngo.weak.type.*[]github.com/fsouza/go-dockerclient.Image€"runtime.zerovalueXtype.github.com/fsouza/go-dockerclient.Imageþˆgo.typelink.[]docker.Image/[]github.com/fsouza/go-dockerclient.Image\type.[]github.com/fsouza/go-dockerclient.Imageþ:go.string."[]*docker.Network"PD[]*docker.Network :go.string."[]*docker.Network"þbtype.[]*github.com/fsouza/go-dockerclient.Network  à\,i   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P:go.string."[]*docker.Network"ptgo.weak.type.*[]*github.com/fsouza/go-dockerclient.Network€"runtime.zerovalue^type.*github.com/fsouza/go-dockerclient.Networkþ”go.typelink.[]*docker.Network/[]*github.com/fsouza/go-dockerclient.Networkbtype.[]*github.com/fsouza/go-dockerclient.Networkþ>go.string."func(*http.Request)"PHfunc(*http.Request) >go.string."func(*http.Request)"þ8type.func(*net/http.Request)$I”3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."func(*http.Request)"pJgo.weak.type.*func(*net/http.Request)€"runtime.zerovalue €8type.func(*net/http.Request)Ð8type.func(*net/http.Request)€,type.*net/http.Requestþ>go.string."[]map[string]string"PH[]map[string]string >go.string."[]map[string]string"þ0type.[]map[string]string  è÷Õf   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P>go.string."[]map[string]string"pBgo.weak.type.*[]map[string]string€"runtime.zerovalue,type.map[string]stringþfgo.typelink.[]map[string]string/[]map[string]string0type.[]map[string]stringþHgo.string."chan<- *docker.Container"`Rchan<- *docker.Container Hgo.string."chan<- *docker.Container"þptype.chan<- *github.com/fsouza/go-dockerclient.Container°°ìw—ô2   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."chan<- *docker.Container"p‚go.weak.type.*chan<- *github.com/fsouza/go-dockerclient.Container€"runtime.zerovaluebtype.*github.com/fsouza/go-dockerclient.Containerþ°go.typelink.chan<- *docker.Container/chan<- *github.com/fsouza/go-dockerclient.Containerptype.chan<- *github.com/fsouza/go-dockerclient.ContainerþBgo.string."*testing.DockerServer"PL*testing.DockerServer Bgo.string."*testing.DockerServer"þzgo.string."func(*testing.DockerServer, string, http.Handler)"„1func(*testing.DockerServer, string, http.Handler) zgo.string."func(*testing.DockerServer, string, http.Handler)"þjtype.func(*"".DockerServer, string, net/http.Handler)°°º30¶3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pzgo.string."func(*testing.DockerServer, string, http.Handler)"p|go.weak.type.*func(*"".DockerServer, string, net/http.Handler)€"runtime.zerovalue €jtype.func(*"".DockerServer, string, net/http.Handler)аjtype.func(*"".DockerServer, string, net/http.Handler)€*type.*"".DockerServertype.string *type.net/http.Handlerþhgo.string."func(*testing.DockerServer) http.Handler"€r(func(*testing.DockerServer) http.Handler hgo.string."func(*testing.DockerServer) http.Handler"þXtype.func(*"".DockerServer) net/http.Handler  ¦ªª3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(*testing.DockerServer) http.Handler"pjgo.weak.type.*func(*"".DockerServer) net/http.Handler€"runtime.zerovalue €Xtype.func(*"".DockerServer) net/http.HandlerÐXtype.func(*"".DockerServer) net/http.Handler€*type.*"".DockerServer*type.net/http.Handlerþ†go.string."func(*testing.DockerServer, string, docker.State) error"7func(*testing.DockerServer, string, docker.State) error †go.string."func(*testing.DockerServer, string, docker.State) error"þ¤type.func(*"".DockerServer, string, github.com/fsouza/go-dockerclient.State) errorÀÀM3PÏ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P†go.string."func(*testing.DockerServer, string, docker.State) error"p¶go.weak.type.*func(*"".DockerServer, string, github.com/fsouza/go-dockerclient.State) error€"runtime.zerovalue €¤type.func(*"".DockerServer, string, github.com/fsouza/go-dockerclient.State) errorа¤type.func(*"".DockerServer, string, github.com/fsouza/go-dockerclient.State) error€*type.*"".DockerServertype.string Xtype.github.com/fsouza/go-dockerclient.State°type.errorþngo.string."func(*testing.DockerServer, string, func())"€x+func(*testing.DockerServer, string, func()) ngo.string."func(*testing.DockerServer, string, func())"þVtype.func(*"".DockerServer, string, func())°°ðÔ±o3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(*testing.DockerServer, string, func())"phgo.weak.type.*func(*"".DockerServer, string, func())€"runtime.zerovalue €Vtype.func(*"".DockerServer, string, func())аVtype.func(*"".DockerServer, string, func())€*type.*"".DockerServertype.string type.func()þngo.string."func(*testing.DockerServer, string, string)"€x+func(*testing.DockerServer, string, string) ngo.string."func(*testing.DockerServer, string, string)"þVtype.func(*"".DockerServer, string, string)°°-qXÕ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pngo.string."func(*testing.DockerServer, string, string)"phgo.weak.type.*func(*"".DockerServer, string, string)€"runtime.zerovalue €Vtype.func(*"".DockerServer, string, string)аVtype.func(*"".DockerServer, string, string)€*type.*"".DockerServertype.string type.stringþ”go.string."func(*testing.DockerServer, string, func(string) docker.Stats)" ž>func(*testing.DockerServer, string, func(string) docker.Stats) ”go.string."func(*testing.DockerServer, string, func(string) docker.Stats)"þ²type.func(*"".DockerServer, string, func(string) github.com/fsouza/go-dockerclient.Stats)°°uBÁ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P”go.string."func(*testing.DockerServer, string, func(string) docker.Stats)"pÄgo.weak.type.*func(*"".DockerServer, string, func(string) github.com/fsouza/go-dockerclient.Stats)€"runtime.zerovalue €²type.func(*"".DockerServer, string, func(string) github.com/fsouza/go-dockerclient.Stats)а²type.func(*"".DockerServer, string, func(string) github.com/fsouza/go-dockerclient.Stats)€*type.*"".DockerServertype.string rtype.func(string) github.com/fsouza/go-dockerclient.Statsþ^go.string."func(*testing.DockerServer, string)"ph#func(*testing.DockerServer, string) ^go.string."func(*testing.DockerServer, string)"þFtype.func(*"".DockerServer, string)  :i2…3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P^go.string."func(*testing.DockerServer, string)"pXgo.weak.type.*func(*"".DockerServer, string)€"runtime.zerovalue €Ftype.func(*"".DockerServer, string)РFtype.func(*"".DockerServer, string)€*type.*"".DockerServertype.stringþNgo.string."func(*testing.DockerServer)"`Xfunc(*testing.DockerServer) Ngo.string."func(*testing.DockerServer)"þ6type.func(*"".DockerServer)þv‡@3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PNgo.string."func(*testing.DockerServer)"pHgo.weak.type.*func(*"".DockerServer)€"runtime.zerovalue €6type.func(*"".DockerServer)Ð6type.func(*"".DockerServer)€*type.*"".DockerServerþ–go.string."func(*testing.DockerServer, http.ResponseWriter, *http.Request)"  ?func(*testing.DockerServer, http.ResponseWriter, *http.Request) –go.string."func(*testing.DockerServer, http.ResponseWriter, *http.Request)"þŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)°°ü]° +3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P–go.string."func(*testing.DockerServer, http.ResponseWriter, *http.Request)"p go.weak.type.*func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)€"runtime.zerovalue €Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)аŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)€*type.*"".DockerServer8type.net/http.ResponseWriter ,type.*net/http.Requestþxgo.string."func(*testing.DockerServer, func(*http.Request))"‚0func(*testing.DockerServer, func(*http.Request)) xgo.string."func(*testing.DockerServer, func(*http.Request))"þhtype.func(*"".DockerServer, func(*net/http.Request))  Iã*æ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pxgo.string."func(*testing.DockerServer, func(*http.Request))"pzgo.weak.type.*func(*"".DockerServer, func(*net/http.Request))€"runtime.zerovalue €htype.func(*"".DockerServer, func(*net/http.Request))Рhtype.func(*"".DockerServer, func(*net/http.Request))€*type.*"".DockerServer8type.func(*net/http.Request)þ\go.string."func(*testing.DockerServer) string"pf"func(*testing.DockerServer) string \go.string."func(*testing.DockerServer) string"þDtype.func(*"".DockerServer) string  FúÕp3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P\go.string."func(*testing.DockerServer) string"pVgo.weak.type.*func(*"".DockerServer) string€"runtime.zerovalue €Dtype.func(*"".DockerServer) stringÐDtype.func(*"".DockerServer) string€*type.*"".DockerServertype.stringþžgo.string."func(*testing.DockerServer, string) (*docker.Container, int, error)"°¨Cfunc(*testing.DockerServer, string) (*docker.Container, int, error) žgo.string."func(*testing.DockerServer, string) (*docker.Container, int, error)"þ¼type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Container, int, error)ÐФr÷$3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pžgo.string."func(*testing.DockerServer, string) (*docker.Container, int, error)"pÎgo.weak.type.*func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Container, int, error)€"runtime.zerovalue €¼type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Container, int, error)Р¼type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Container, int, error)€*type.*"".DockerServertype.string btype.*github.com/fsouza/go-dockerclient.Container°type.intÀtype.errorþ~go.string."func(*testing.DockerServer, string) (string, error)"ˆ3func(*testing.DockerServer, string) (string, error) ~go.string."func(*testing.DockerServer, string) (string, error)"þftype.func(*"".DockerServer, string) (string, error)ÀÀÍóti3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P~go.string."func(*testing.DockerServer, string) (string, error)"pxgo.weak.type.*func(*"".DockerServer, string) (string, error)€"runtime.zerovalue €ftype.func(*"".DockerServer, string) (string, error)Рftype.func(*"".DockerServer, string) (string, error)€*type.*"".DockerServertype.string type.string°type.errorþˆgo.string."func(*testing.DockerServer, string) (string, int, error)" ’8func(*testing.DockerServer, string) (string, int, error) ˆgo.string."func(*testing.DockerServer, string) (string, int, error)"þptype.func(*"".DockerServer, string) (string, int, error)ÐÐC ØO3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pˆgo.string."func(*testing.DockerServer, string) (string, int, error)"p‚go.weak.type.*func(*"".DockerServer, string) (string, int, error)€"runtime.zerovalue €ptype.func(*"".DockerServer, string) (string, int, error)Рptype.func(*"".DockerServer, string) (string, int, error)€*type.*"".DockerServertype.string type.string°type.intÀtype.errorþšgo.string."func(*testing.DockerServer, string) (*docker.Network, int, error)"°¤Afunc(*testing.DockerServer, string) (*docker.Network, int, error) šgo.string."func(*testing.DockerServer, string) (*docker.Network, int, error)"þ¸type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Network, int, error)ÐЛÄèŸ3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pšgo.string."func(*testing.DockerServer, string) (*docker.Network, int, error)"pÊgo.weak.type.*func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Network, int, error)€"runtime.zerovalue €¸type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Network, int, error)Р¸type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Network, int, error)€*type.*"".DockerServertype.string ^type.*github.com/fsouza/go-dockerclient.Network°type.intÀtype.errorþrgo.string."func(*testing.DockerServer) *docker.APIEvents"€|-func(*testing.DockerServer) *docker.APIEvents rgo.string."func(*testing.DockerServer) *docker.APIEvents"þtype.func(*"".DockerServer) *github.com/fsouza/go-dockerclient.APIEvents  vÿPv3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Prgo.string."func(*testing.DockerServer) *docker.APIEvents"p¢go.weak.type.*func(*"".DockerServer) *github.com/fsouza/go-dockerclient.APIEvents€"runtime.zerovalue €type.func(*"".DockerServer) *github.com/fsouza/go-dockerclient.APIEventsÐtype.func(*"".DockerServer) *github.com/fsouza/go-dockerclient.APIEvents€*type.*"".DockerServerbtype.*github.com/fsouza/go-dockerclient.APIEventsþ˜go.string."func(*testing.DockerServer, string) (*docker.ExecInspect, error)"°¢@func(*testing.DockerServer, string) (*docker.ExecInspect, error) ˜go.string."func(*testing.DockerServer, string) (*docker.ExecInspect, error)"þ¶type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)ÀÀ,— =3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P˜go.string."func(*testing.DockerServer, string) (*docker.ExecInspect, error)"pÈgo.weak.type.*func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)€"runtime.zerovalue €¶type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)Р¶type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)€*type.*"".DockerServertype.string ftype.*github.com/fsouza/go-dockerclient.ExecInspect°type.errorþhgo.string."func(http.ResponseWriter, *http.Request)"€r(func(http.ResponseWriter, *http.Request) hgo.string."func(http.ResponseWriter, *http.Request)"þjtype.func(net/http.ResponseWriter, *net/http.Request)  ‘ô›3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Phgo.string."func(http.ResponseWriter, *http.Request)"p|go.weak.type.*func(net/http.ResponseWriter, *net/http.Request)€"runtime.zerovalue €jtype.func(net/http.ResponseWriter, *net/http.Request)Рjtype.func(net/http.ResponseWriter, *net/http.Request)€8type.net/http.ResponseWriter,type.*net/http.Requestþ""..gostring.1€þnfunc(*testing.DockerServer, func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) ""..gostring.1þütype.func(*"".DockerServer, func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)°°0|)3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P""..gostring.1pŽgo.weak.type.*func(*"".DockerServer, func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)€"runtime.zerovalue €ütype.func(*"".DockerServer, func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)Рütype.func(*"".DockerServer, func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)€*type.*"".DockerServerjtype.func(net/http.ResponseWriter, *net/http.Request) jtype.func(net/http.ResponseWriter, *net/http.Request)þtgo.string."func(*testing.DockerServer, *docker.Container)"€~.func(*testing.DockerServer, *docker.Container) tgo.string."func(*testing.DockerServer, *docker.Container)"þ’type.func(*"".DockerServer, *github.com/fsouza/go-dockerclient.Container)  ËƸs3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."func(*testing.DockerServer, *docker.Container)"p¤go.weak.type.*func(*"".DockerServer, *github.com/fsouza/go-dockerclient.Container)€"runtime.zerovalue €’type.func(*"".DockerServer, *github.com/fsouza/go-dockerclient.Container)Р’type.func(*"".DockerServer, *github.com/fsouza/go-dockerclient.Container)€*type.*"".DockerServerbtype.*github.com/fsouza/go-dockerclient.Containerþ2go.string."CustomHandler"@< CustomHandler 2go.string."CustomHandler"þLgo.string."func(string, http.Handler)"`Vfunc(string, http.Handler) Lgo.string."func(string, http.Handler)"þFtype.func(string, net/http.Handler)  )m¶ 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PLgo.string."func(string, http.Handler)"pXgo.weak.type.*func(string, net/http.Handler)€"runtime.zerovalue €Ftype.func(string, net/http.Handler)РFtype.func(string, net/http.Handler)€type.string*type.net/http.Handlerþ4go.string."DefaultHandler"@>DefaultHandler 4go.string."DefaultHandler"þ>go.string."func() http.Handler"PHfunc() http.Handler >go.string."func() http.Handler"þ8type.func() net/http.Handler¥ë53 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P>go.string."func() http.Handler"pJgo.weak.type.*func() net/http.Handler€"runtime.zerovalue €8type.func() net/http.HandlerЀ8type.func() net/http.Handler€*type.net/http.Handlerþ6go.string."MutateContainer"@@MutateContainer 6go.string."MutateContainer"þXgo.string."func(string, docker.State) error"pb func(string, docker.State) error Xgo.string."func(string, docker.State) error"þ€type.func(string, github.com/fsouza/go-dockerclient.State) error°°.Õ 3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PXgo.string."func(string, docker.State) error"p’go.weak.type.*func(string, github.com/fsouza/go-dockerclient.State) error€"runtime.zerovalue €€type.func(string, github.com/fsouza/go-dockerclient.State) errorР€type.func(string, github.com/fsouza/go-dockerclient.State) error€type.stringXtype.github.com/fsouza/go-dockerclient.State type.errorþ.go.string."PrepareExec"@8 PrepareExec .go.string."PrepareExec"þ@go.string."func(string, func())"PJfunc(string, func()) @go.string."func(string, func())"þ2type.func(string, func())  Â+>Ž3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."func(string, func())"pDgo.weak.type.*func(string, func())€"runtime.zerovalue €2type.func(string, func())Р2type.func(string, func())€type.stringtype.func()þ4go.string."PrepareFailure"@>PrepareFailure 4go.string."PrepareFailure"þ@go.string."func(string, string)"PJfunc(string, string) @go.string."func(string, string)"þ2type.func(string, string)  õ!™é3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."func(string, string)"pDgo.weak.type.*func(string, string)€"runtime.zerovalue €2type.func(string, string)Р2type.func(string, string)€type.stringtype.stringþ@go.string."PrepareMultiFailures"PJPrepareMultiFailures @go.string."PrepareMultiFailures"þ0go.string."PrepareStats"@: PrepareStats 0go.string."PrepareStats"þfgo.string."func(string, func(string) docker.Stats)"pp'func(string, func(string) docker.Stats) fgo.string."func(string, func(string) docker.Stats)"þŽtype.func(string, func(string) github.com/fsouza/go-dockerclient.Stats)  ;ô=3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pfgo.string."func(string, func(string) docker.Stats)"p go.weak.type.*func(string, func(string) github.com/fsouza/go-dockerclient.Stats)€"runtime.zerovalue €Žtype.func(string, func(string) github.com/fsouza/go-dockerclient.Stats)РŽtype.func(string, func(string) github.com/fsouza/go-dockerclient.Stats)€type.stringrtype.func(string) github.com/fsouza/go-dockerclient.Statsþ0go.string."ResetFailure"@: ResetFailure 0go.string."ResetFailure"þ0go.string."func(string)"@: func(string) 0go.string."func(string)"þ"type.func(string)ŠÇ¹¾3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."func(string)"p4go.weak.type.*func(string)€"runtime.zerovalue €"type.func(string)Ð"type.func(string)€type.stringþgo.string."createExecContainer"PHcreateExecContainer >go.string."createExecContainer"þ2go.string."createNetwork"@< createNetwork 2go.string."createNetwork"þ2go.string."findContainer"@< findContainer 2go.string."findContainer"þpgo.string."func(string) (*docker.Container, int, error)"€z,func(string) (*docker.Container, int, error) pgo.string."func(string) (*docker.Container, int, error)"þ˜type.func(string) (*github.com/fsouza/go-dockerclient.Container, int, error)ÀÀ4‡Õ`3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."func(string) (*docker.Container, int, error)"pªgo.weak.type.*func(string) (*github.com/fsouza/go-dockerclient.Container, int, error)€"runtime.zerovalue €˜type.func(string) (*github.com/fsouza/go-dockerclient.Container, int, error)Иtype.func(string) (*github.com/fsouza/go-dockerclient.Container, int, error)€type.stringbtype.*github.com/fsouza/go-dockerclient.Container type.int°type.errorþ*go.string."findImage"@4 findImage *go.string."findImage"þPgo.string."func(string) (string, error)"`Zfunc(string) (string, error) Pgo.string."func(string) (string, error)"þBtype.func(string) (string, error)°°D +j+3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PPgo.string."func(string) (string, error)"pTgo.weak.type.*func(string) (string, error)€"runtime.zerovalue €Btype.func(string) (string, error)ÐBtype.func(string) (string, error)€type.stringtype.string type.errorþ2go.string."findImageByID"@< findImageByID 2go.string."findImageByID"þZgo.string."func(string) (string, int, error)"pd!func(string) (string, int, error) Zgo.string."func(string) (string, int, error)"þLtype.func(string) (string, int, error)ÀÀ»¶(>3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."func(string) (string, int, error)"p^go.weak.type.*func(string) (string, int, error)€"runtime.zerovalue €Ltype.func(string) (string, int, error)ÐLtype.func(string) (string, int, error)€type.stringtype.string type.int°type.errorþ.go.string."findNetwork"@8 findNetwork .go.string."findNetwork"þlgo.string."func(string) (*docker.Network, int, error)"€v*func(string) (*docker.Network, int, error) lgo.string."func(string) (*docker.Network, int, error)"þ”type.func(string) (*github.com/fsouza/go-dockerclient.Network, int, error)ÀÀ%Êò3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Plgo.string."func(string) (*docker.Network, int, error)"p¦go.weak.type.*func(string) (*github.com/fsouza/go-dockerclient.Network, int, error)€"runtime.zerovalue €”type.func(string) (*github.com/fsouza/go-dockerclient.Network, int, error)Дtype.func(string) (*github.com/fsouza/go-dockerclient.Network, int, error)€type.string^type.*github.com/fsouza/go-dockerclient.Network type.int°type.errorþ2go.string."generateEvent"@< generateEvent 2go.string."generateEvent"þHgo.string."func() *docker.APIEvents"`Rfunc() *docker.APIEvents Hgo.string."func() *docker.APIEvents"þptype.func() *github.com/fsouza/go-dockerclient.APIEventsŒá‡Ô3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PHgo.string."func() *docker.APIEvents"p‚go.weak.type.*func() *github.com/fsouza/go-dockerclient.APIEvents€"runtime.zerovalue €ptype.func() *github.com/fsouza/go-dockerclient.APIEventsЀptype.func() *github.com/fsouza/go-dockerclient.APIEvents€btype.*github.com/fsouza/go-dockerclient.APIEventsþ,go.string."generateID"@6 +generateID ,go.string."generateID"þ&go.string."getExec"00getExec &go.string."getExec"þjgo.string."func(string) (*docker.ExecInspect, error)"€t)func(string) (*docker.ExecInspect, error) jgo.string."func(string) (*docker.ExecInspect, error)"þ’type.func(string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)°° ‰ï3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."func(string) (*docker.ExecInspect, error)"p¤go.weak.type.*func(string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)€"runtime.zerovalue €’type.func(string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)Ð’type.func(string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)€type.stringftype.*github.com/fsouza/go-dockerclient.ExecInspect type.errorþ(go.string."getImage"@2getImage (go.string."getImage"þ4go.string."handlerWrapper"@>handlerWrapper 4go.string."handlerWrapper"þÆgo.string."func(func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request)"ÐÐWfunc(func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) Ægo.string."func(func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request)"þØtype.func(func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)  é7¤x3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PÆgo.string."func(func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request)"pêgo.weak.type.*func(func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)€"runtime.zerovalue €Øtype.func(func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)ÐØtype.func(func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)€jtype.func(net/http.ResponseWriter, *net/http.Request)jtype.func(net/http.ResponseWriter, *net/http.Request)þ8go.string."inspectContainer"PBinspectContainer 8go.string."inspectContainer"þ@go.string."inspectExecContainer"PJinspectExecContainer @go.string."inspectExecContainer"þ0go.string."inspectImage"@: inspectImage 0go.string."inspectImage"þ4go.string."listContainers"@>listContainers 4go.string."listContainers"þ,go.string."listEvents"@6 +listEvents ,go.string."listEvents"þ,go.string."listImages"@6 +listImages ,go.string."listImages"þ0go.string."listNetworks"@: listNetworks 0go.string."listNetworks"þ*go.string."loadImage"@4 loadImage *go.string."loadImage"þ.go.string."networkInfo"@8 networkInfo .go.string."networkInfo"þ$go.string."notify"0.notify $go.string."notify"þFgo.string."func(*docker.Container)"PPfunc(*docker.Container) Fgo.string."func(*docker.Container)"þntype.func(*github.com/fsouza/go-dockerclient.Container)¬û:v3 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PFgo.string."func(*docker.Container)"p€go.weak.type.*func(*github.com/fsouza/go-dockerclient.Container)€"runtime.zerovalue €ntype.func(*github.com/fsouza/go-dockerclient.Container)Ðntype.func(*github.com/fsouza/go-dockerclient.Container)€btype.*github.com/fsouza/go-dockerclient.Containerþ4go.string."pauseContainer"@>pauseContainer 4go.string."pauseContainer"þ,go.string."pingDocker"@6 +pingDocker ,go.string."pingDocker"þ*go.string."pullImage"@4 pullImage *go.string."pullImage"þ*go.string."pushImage"@4 pushImage *go.string."pushImage"þ6go.string."removeContainer"@@removeContainer 6go.string."removeContainer"þ.go.string."removeImage"@8 removeImage .go.string."removeImage"þ6go.string."renameContainer"@@renameContainer 6go.string."renameContainer"þ>go.string."resizeExecContainer"PHresizeExecContainer >go.string."resizeExecContainer"þ4go.string."startContainer"@>startContainer 4go.string."startContainer"þstatsContainer 4go.string."statsContainer"þ2go.string."stopContainer"@< stopContainer 2go.string."stopContainer"þ(go.string."tagImage"@2tagImage (go.string."tagImage"þ0go.string."topContainer"@: topContainer 0go.string."topContainer"þ8go.string."unpauseContainer"PBunpauseContainer 8go.string."unpauseContainer"þ2go.string."waitContainer"@< waitContainer 2go.string."waitContainer"þ*type.*"".DockerServer++ØÏPß677Š   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*testing.DockerServer"p"".(*DockerServer).PrepareStats€>"".(*DockerServer).PrepareStats0go.string."ResetFailure"°"type.func(string)ÀFtype.func(*"".DockerServer, string)Ð>"".(*DockerServer).ResetFailureà>"".(*DockerServer).ResetFailureðgo.string."createExecContainer"À"go.importpath."".Ðjtype.func(net/http.ResponseWriter, *net/http.Request)àŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ðL"".(*DockerServer).createExecContainer€L"".(*DockerServer).createExecContainer2go.string."createNetwork" "go.importpath."".°jtype.func(net/http.ResponseWriter, *net/http.Request)ÀŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)Ð@"".(*DockerServer).createNetworkà@"".(*DockerServer).createNetworkð2go.string."findContainer"€"go.importpath."".˜type.func(string) (*github.com/fsouza/go-dockerclient.Container, int, error) ¼type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Container, int, error)°@"".(*DockerServer).findContainerÀ@"".(*DockerServer).findContainerÐ*go.string."findImage"à"go.importpath."".ðBtype.func(string) (string, error)€ftype.func(*"".DockerServer, string) (string, error)8"".(*DockerServer).findImage 8"".(*DockerServer).findImage°2go.string."findImageByID"À"go.importpath."".ÐLtype.func(string) (string, int, error)àptype.func(*"".DockerServer, string) (string, int, error)ð@"".(*DockerServer).findImageByID€@"".(*DockerServer).findImageByID.go.string."findNetwork" "go.importpath."".°”type.func(string) (*github.com/fsouza/go-dockerclient.Network, int, error)À¸type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.Network, int, error)Ð<"".(*DockerServer).findNetworkà<"".(*DockerServer).findNetworkð2go.string."generateEvent"€"go.importpath."".ptype.func() *github.com/fsouza/go-dockerclient.APIEvents type.func(*"".DockerServer) *github.com/fsouza/go-dockerclient.APIEvents°@"".(*DockerServer).generateEventÀ@"".(*DockerServer).generateEventÐ,go.string."generateID"à"go.importpath."".ð$type.func() string€Dtype.func(*"".DockerServer) string:"".(*DockerServer).generateID :"".(*DockerServer).generateID°&go.string."getExec"À"go.importpath."".Ð’type.func(string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)à¶type.func(*"".DockerServer, string) (*github.com/fsouza/go-dockerclient.ExecInspect, error)ð4"".(*DockerServer).getExec€4"".(*DockerServer).getExec(go.string."getImage" "go.importpath."".°jtype.func(net/http.ResponseWriter, *net/http.Request)ÀŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)Ð6"".(*DockerServer).getImageà6"".(*DockerServer).getImageð4go.string."handlerWrapper"€"go.importpath."".Øtype.func(func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request) ütype.func(*"".DockerServer, func(net/http.ResponseWriter, *net/http.Request)) func(net/http.ResponseWriter, *net/http.Request)°B"".(*DockerServer).handlerWrapperÀB"".(*DockerServer).handlerWrapperÐ8go.string."inspectContainer"à"go.importpath."".ðjtype.func(net/http.ResponseWriter, *net/http.Request)€Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)F"".(*DockerServer).inspectContainer F"".(*DockerServer).inspectContainer°@go.string."inspectExecContainer"À"go.importpath."".Ðjtype.func(net/http.ResponseWriter, *net/http.Request)àŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ðN"".(*DockerServer).inspectExecContainer€N"".(*DockerServer).inspectExecContainer0go.string."inspectImage" "go.importpath."".°jtype.func(net/http.ResponseWriter, *net/http.Request)ÀŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)Ð>"".(*DockerServer).inspectImageà>"".(*DockerServer).inspectImageð4go.string."listContainers"€"go.importpath."".jtype.func(net/http.ResponseWriter, *net/http.Request) Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)°B"".(*DockerServer).listContainersÀB"".(*DockerServer).listContainersÐ,go.string."listEvents"à"go.importpath."".ðjtype.func(net/http.ResponseWriter, *net/http.Request)€Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request):"".(*DockerServer).listEvents :"".(*DockerServer).listEvents°,go.string."listImages"À"go.importpath."".Ðjtype.func(net/http.ResponseWriter, *net/http.Request)àŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ð:"".(*DockerServer).listImages€:"".(*DockerServer).listImages0go.string."listNetworks" "go.importpath."".°jtype.func(net/http.ResponseWriter, *net/http.Request)ÀŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)Ð>"".(*DockerServer).listNetworksà>"".(*DockerServer).listNetworksð*go.string."loadImage"€"go.importpath."".jtype.func(net/http.ResponseWriter, *net/http.Request) Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)°8"".(*DockerServer).loadImageÀ8"".(*DockerServer).loadImageÐ.go.string."networkInfo"à"go.importpath."".ðjtype.func(net/http.ResponseWriter, *net/http.Request)€Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)<"".(*DockerServer).networkInfo <"".(*DockerServer).networkInfo°$go.string."notify"À"go.importpath."".Ðntype.func(*github.com/fsouza/go-dockerclient.Container)à’type.func(*"".DockerServer, *github.com/fsouza/go-dockerclient.Container)ð2"".(*DockerServer).notify€2"".(*DockerServer).notify4go.string."pauseContainer" "go.importpath."".°jtype.func(net/http.ResponseWriter, *net/http.Request)ÀŽtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ÐB"".(*DockerServer).pauseContaineràB"".(*DockerServer).pauseContainerð,go.string."pingDocker"€ "go.importpath."". jtype.func(net/http.ResponseWriter, *net/http.Request)  Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)° :"".(*DockerServer).pingDockerÀ :"".(*DockerServer).pingDockerÐ *go.string."pullImage"à "go.importpath."".ð jtype.func(net/http.ResponseWriter, *net/http.Request)€!Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)!8"".(*DockerServer).pullImage !8"".(*DockerServer).pullImage°!*go.string."pushImage"À!"go.importpath."".Ð!jtype.func(net/http.ResponseWriter, *net/http.Request)à!Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ð!8"".(*DockerServer).pushImage€"8"".(*DockerServer).pushImage"6go.string."removeContainer" ""go.importpath."".°"jtype.func(net/http.ResponseWriter, *net/http.Request)À"Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)Ð"D"".(*DockerServer).removeContainerà"D"".(*DockerServer).removeContainerð".go.string."removeImage"€#"go.importpath."".#jtype.func(net/http.ResponseWriter, *net/http.Request) #Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)°#<"".(*DockerServer).removeImageÀ#<"".(*DockerServer).removeImageÐ#6go.string."renameContainer"à#"go.importpath."".ð#jtype.func(net/http.ResponseWriter, *net/http.Request)€$Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)$D"".(*DockerServer).renameContainer $D"".(*DockerServer).renameContainer°$>go.string."resizeExecContainer"À$"go.importpath."".Ð$jtype.func(net/http.ResponseWriter, *net/http.Request)à$Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ð$L"".(*DockerServer).resizeExecContainer€%L"".(*DockerServer).resizeExecContainer%4go.string."startContainer" %"go.importpath."".°%jtype.func(net/http.ResponseWriter, *net/http.Request)À%Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)Ð%B"".(*DockerServer).startContainerà%B"".(*DockerServer).startContainerð%"".(*DockerServer).topContainerÀ)>"".(*DockerServer).topContainerÐ)8go.string."unpauseContainer"à)"go.importpath."".ð)jtype.func(net/http.ResponseWriter, *net/http.Request)€*Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)*F"".(*DockerServer).unpauseContainer *F"".(*DockerServer).unpauseContainer°*2go.string."waitContainer"À*"go.importpath."".Ð*jtype.func(net/http.ResponseWriter, *net/http.Request)à*Žtype.func(*"".DockerServer, net/http.ResponseWriter, *net/http.Request)ð*@"".(*DockerServer).waitContainer€+@"".(*DockerServer).waitContainerþ,0type..gc."".DockerServer*þ8type..gcprog."".DockerServer(–UUV¥U­j©•þ@go.string."testing.DockerServer"PJtesting.DockerServer @go.string."testing.DockerServer"þ,go.string."containers"@6 +containers ,go.string."containers"þ"go.string."execs"0,execs "go.string."execs"þ&go.string."execMut"00execMut &go.string."execMut"þ go.string."cMut"0*cMut go.string."cMut"þ$go.string."images"0.images $go.string."images"þ go.string."iMut"0*iMut go.string."iMut"þ$go.string."imgIDs"0.imgIDs $go.string."imgIDs"þ(go.string."networks"@2networks (go.string."networks"þ$go.string."netMut"0.netMut $go.string."netMut"þ(go.string."listener"@2listener (go.string."listener"þgo.string."mux"0(mux go.string."mux"þ go.string."hook"0*hook go.string."hook"þ(go.string."failures"@2failures (go.string."failures"þ2go.string."multiFailures"@< multiFailures 2go.string."multiFailures"þ2go.string."execCallbacks"@< execCallbacks 2go.string."execCallbacks"þ4go.string."statsCallbacks"@>statsCallbacks 4go.string."statsCallbacks"þ4go.string."customHandlers"@>customHandlers 4go.string."customHandlers"þ0go.string."handlerMutex"@: handlerMutex 0go.string."handlerMutex"þ"go.string."cChan"0,cChan "go.string."cChan"þ0go.string."DockerServer"@: DockerServer 0go.string."DockerServer"þ(type."".DockerServer€€@Ö +Y0H`x˜°ÈØàèð 8ˆ à runtime.algarray00type..gc."".DockerServer@8type..gcprog."".DockerServerP@go.string."testing.DockerServer"p*type.*"".DockerServer€"runtime.zerovalueÀ(type."".DockerServerÀ,go.string."containers"Ð"go.importpath."".àftype.[]*github.com/fsouza/go-dockerclient.Container"go.string."execs" "go.importpath."".°jtype.[]*github.com/fsouza/go-dockerclient.ExecInspectà&go.string."execMut"ð"go.importpath."".€"type.sync.RWMutex° go.string."cMut"À"go.importpath."".Ð"type.sync.RWMutex€$go.string."images""go.importpath."". \type.[]github.com/fsouza/go-dockerclient.ImageÐ go.string."iMut"à"go.importpath."".ð"type.sync.RWMutex $go.string."imgIDs"°"go.importpath."".À,type.map[string]stringð(go.string."networks"€"go.importpath."".btype.[]*github.com/fsouza/go-dockerclient.NetworkÀ$go.string."netMut"Ð"go.importpath."".à"type.sync.RWMutex(go.string."listener" "go.importpath."".°"type.net.Listeneràgo.string."mux"ð"go.importpath."".€œtype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Router° go.string."hook"À"go.importpath."".Ð8type.func(*net/http.Request)€ (go.string."failures" "go.importpath."".  ,type.map[string]stringÐ 2go.string."multiFailures"à "go.importpath."".ð 0type.[]map[string]string  +2go.string."execCallbacks"° +"go.importpath."".À +,type.map[string]func()ð +4go.string."statsCallbacks"€ "go.importpath."". ˆtype.map[string]func(string) github.com/fsouza/go-dockerclient.StatsÀ 4go.string."customHandlers"Ð "go.importpath."".à @type.map[string]net/http.Handler 0go.string."handlerMutex"  "go.importpath."".° "type.sync.RWMutexà "go.string."cChan"ð "go.importpath."".€ ptype.chan<- *github.com/fsouza/go-dockerclient.Container`° (type."".DockerServer° 0go.string."DockerServer"À "go.importpath."".Ð €(type."".DockerServerþ0go.string."[]*mux.Route"@: []*mux.Route 0go.string."[]*mux.Route"þžtype.[]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route  HN©   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P0go.string."[]*mux.Route"p°go.weak.type.*[]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route€"runtime.zerovalueštype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþÆgo.typelink.[]*mux.Route/[]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Routežtype.[]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Routeþ2go.string."[8]*mux.Route"@< [8]*mux.Route 2go.string."[8]*mux.Route"þ type.[8]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteÀÀ@OŒ`  runtime.algarray0bruntime.gcbits.0x88888888000000000000000000000000P2go.string."[8]*mux.Route"p²go.weak.type.*[8]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route€"runtime.zerovalueštype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route žtype.[]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþÊgo.typelink.[8]*mux.Route/[8]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route type.[8]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþRgo.string."*map.bucket[string]*mux.Route"`\*map.bucket[string]*mux.Route Rgo.string."*map.bucket[string]*mux.Route"þÀtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route  ®!Om6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."*map.bucket[string]*mux.Route"pÒgo.weak.type.**map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route€"runtime.zerovalue¾type.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþPgo.string."map.bucket[string]*mux.Route"`Zmap.bucket[string]*mux.Route Pgo.string."map.bucket[string]*mux.Route"þ¾type.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route°°ÐŒ‹ùˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484848488888888000000PPgo.string."map.bucket[string]*mux.Route"pÐgo.weak.type.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route€"runtime.zerovalueÀ¾type.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteÀ go.string."keys"àtype.[8]string$go.string."values"° type.[8]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Routeà(go.string."overflow"€Àtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþJgo.string."map.hdr[string]*mux.Route"`Tmap.hdr[string]*mux.Route Jgo.string."map.hdr[string]*mux.Route"þ¸type.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Routeàà0 ÿ  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PJgo.string."map.hdr[string]*mux.Route"pÊgo.weak.type.*map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route€"runtime.zerovalueÀ¸type.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteÀ&go.string."buckets"àÀtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route,go.string."oldbuckets"°Àtype.*map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþBgo.string."map[string]*mux.Route"PLmap[string]*mux.Route Bgo.string."map[string]*mux.Route"þ°type.map[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteÜÜé´UÏ5Ð € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."map[string]*mux.Route"pÂgo.weak.type.*map[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route€"runtime.zerovaluetype.string štype.*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route°¾type.map.bucket[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteÀ¸type.map.hdr[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Routeþêgo.typelink.map[string]*mux.Route/map[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.Route°type.map[string]*github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux.RouteþTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ0type..hashfunc.[1]string(type..hash.[1]stringþ,type..eqfunc.[1]string$type..eq.[1]stringþ&type..alg.[1]string 0type..hashfunc.[1]string,type..eqfunc.[1]stringþbruntime.gcbits.0x48000000000000000000000000000000 Hþ*go.string."[1]string"@4 [1]string *go.string."[1]string"þtype.[1]stringÀÀĸb  &type..alg.[1]string0bruntime.gcbits.0x48000000000000000000000000000000P*go.string."[1]string"p.go.weak.type.*[1]string€"runtime.zerovaluetype.string type.[]stringþ>go.typelink.[1]string/[1]stringtype.[1]stringþbruntime.gcbits.0x84000000000000000000000000000000 „þrgo.string."struct { F uintptr; R *testing.DockerServer }"€|-struct { F uintptr; R *testing.DockerServer } rgo.string."struct { F uintptr; R *testing.DockerServer }"þgo.string."F"0$F go.string."F"þgo.string."R"0$R go.string."R"þZtype.struct { F uintptr; R *"".DockerServer }ààÝ|(I À runtime.algarray0bruntime.gcbits.0x84000000000000000000000000000000Prgo.string."struct { F uintptr; R *testing.DockerServer }"plgo.weak.type.*struct { F uintptr; R *"".DockerServer }€"runtime.zerovalueÀZtype.struct { F uintptr; R *"".DockerServer }Àgo.string."F"àtype.uintptrgo.string."R"°*type.*"".DockerServerþ,go.string."*[1]string"@6 +*[1]string ,go.string."*[1]string"þtype.*[1]string  l.!ä6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[1]string"p0go.weak.type.**[1]string€"runtime.zerovaluetype.[1]stringþtgo.string."*struct { F uintptr; R *testing.DockerServer }"€~.*struct { F uintptr; R *testing.DockerServer } tgo.string."*struct { F uintptr; R *testing.DockerServer }"þ\type.*struct { F uintptr; R *"".DockerServer }  ƒ²B6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ptgo.string."*struct { F uintptr; R *testing.DockerServer }"pngo.weak.type.**struct { F uintptr; R *"".DockerServer }€"runtime.zerovalueZtype.struct { F uintptr; R *"".DockerServer }þ^runtime.gcbits.0x000000000000000000000000000000 þ@go.string."[0]map[string]string"PJ[0]map[string]string @go.string."[0]map[string]string"þ2type.[0]map[string]stringÀÀ±l}ó‘ € runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P@go.string."[0]map[string]string"pDgo.weak.type.*[0]map[string]string€"runtime.zerovalue,type.map[string]string 0type.[]map[string]stringþjgo.typelink.[0]map[string]string/[0]map[string]string2type.[0]map[string]stringþBgo.string."*[0]map[string]string"PL*[0]map[string]string Bgo.string."*[0]map[string]string"þ4type.*[0]map[string]string  õr<±6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PBgo.string."*[0]map[string]string"pFgo.weak.type.**[0]map[string]string€"runtime.zerovalue2type.[0]map[string]stringþgo.typelink.[]uintptr/[]uintptrtype.[]uintptrþ,go.string."[4]uintptr"@6 +[4]uintptr ,go.string."[4]uintptr"þtype.[4]uintptrÀÀ l<‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P,go.string."[4]uintptr"p0go.weak.type.*[4]uintptr€"runtime.zerovaluetype.uintptr type.[]uintptrþBgo.typelink.[4]uintptr/[4]uintptrtype.[4]uintptrþbruntime.gcbits.0x88888844440000000000000000000000 ˆˆˆDDþPgo.string."map.iter[string]http.Handler"`Zmap.iter[string]http.Handler Pgo.string."map.iter[string]http.Handler"þgo.string."key"0(key go.string."key"þgo.string."val"0(val go.string."val"þgo.string."h"0$h go.string."h"þ go.string."bptr"0*bptr go.string."bptr"þ"go.string."other"0,other "go.string."other"þJtype.map.iter[string]net/http.HandlerððP»  (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PPgo.string."map.iter[string]http.Handler"p\go.weak.type.*map.iter[string]net/http.Handler€"runtime.zerovalueÀJtype.map.iter[string]net/http.HandlerÀgo.string."key"àtype.*stringgo.string."val"°,type.*net/http.Handleràgo.string."t"€type.*uint8°go.string."h"ÐJtype.*map.hdr[string]net/http.Handler€&go.string."buckets" Ptype.*map.bucket[string]net/http.HandlerÐ go.string."bptr"ðPtype.*map.bucket[string]net/http.Handler "go.string."other"Àtype.[4]uintptrþDgo.string."**testing.DockerServer"PN**testing.DockerServer Dgo.string."**testing.DockerServer"þ,type.**"".DockerServer  Ep¯6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."**testing.DockerServer"p>go.weak.type.***"".DockerServer€"runtime.zerovalue*type.*"".DockerServerþjgo.string."*func(http.ResponseWriter, *http.Request)"€t)*func(http.ResponseWriter, *http.Request) jgo.string."*func(http.ResponseWriter, *http.Request)"þltype.*func(net/http.ResponseWriter, *net/http.Request)  ö'~P6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Pjgo.string."*func(http.ResponseWriter, *http.Request)"p~go.weak.type.**func(net/http.ResponseWriter, *net/http.Request)€"runtime.zerovaluejtype.func(net/http.ResponseWriter, *net/http.Request)þbruntime.gcbits.0x84488800000000000000000000000000 „HˆþÒgo.string."struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) }"àÜ]struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) } Ògo.string."struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) }"þgo.string."A0"0&A0 go.string."A0"þgo.string."A1"0&A1 go.string."A1"þÊtype.struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }°°v$À(  runtime.algarray0bruntime.gcbits.0x84488800000000000000000000000000PÒgo.string."struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) }"pÜgo.weak.type.*struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }€"runtime.zerovalueÀÊtype.struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }Àgo.string."F"àtype.uintptrgo.string."A0"°,type.**"".DockerServeràgo.string."A1"€ltype.*func(net/http.ResponseWriter, *net/http.Request)þÔgo.string."*struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) }"àÞ^*struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) } Ôgo.string."*struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) }"þÌtype.*struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }  ”r‡À6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PÔgo.string."*struct { F uintptr; A0 **testing.DockerServer; A1 *func(http.ResponseWriter, *http.Request) }"pÞgo.weak.type.**struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }€"runtime.zerovalueÊtype.struct { F uintptr; A0 **"".DockerServer; A1 *func(net/http.ResponseWriter, *net/http.Request) }þDgo.string."[]docker.APIContainers"PN[]docker.APIContainers Dgo.string."[]docker.APIContainers"þltype.[]github.com/fsouza/go-dockerclient.APIContainers  Ò   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PDgo.string."[]docker.APIContainers"p~go.weak.type.*[]github.com/fsouza/go-dockerclient.APIContainers€"runtime.zerovaluehtype.github.com/fsouza/go-dockerclient.APIContainersþ¨go.typelink.[]docker.APIContainers/[]github.com/fsouza/go-dockerclient.APIContainersltype.[]github.com/fsouza/go-dockerclient.APIContainersþbruntime.gcbits.0xcc000000000000000000000000000000 Ìþ0go.string."interface {}"@: interface {} 0go.string."interface {}"þ"type.interface {}ÀÀçW  € runtime.algarray0bruntime.gcbits.0xcc000000000000000000000000000000P0go.string."interface {}"p4go.weak.type.*interface {}€"runtime.zerovalueÀ"type.interface {}þ4go.string."[]interface {}"@>[]interface {} 4go.string."[]interface {}"þ&type.[]interface {}  p“ê/   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P4go.string."[]interface {}"p8go.weak.type.*[]interface {}€"runtime.zerovalue"type.interface {}þRgo.typelink.[]interface {}/[]interface {}&type.[]interface {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·44568aa369055d8938d809aa5d80843bþTgclocals·9c703c5c7b9c1932c840b69f8ebce236þ[8]docker.Port 4go.string."[8]docker.Port"þ\type.[8]github.com/fsouza/go-dockerclient.PortÀÀ€~/(ã ftype..alg.[8]github.com/fsouza/go-dockerclient.Port0bruntime.gcbits.0x48484848484848480000000000000000P4go.string."[8]docker.Port"pngo.weak.type.*[8]github.com/fsouza/go-dockerclient.Port€"runtime.zerovalueVtype.github.com/fsouza/go-dockerclient.Port Ztype.[]github.com/fsouza/go-dockerclient.Portþˆgo.typelink.[8]docker.Port/[8]github.com/fsouza/go-dockerclient.Port\type.[8]github.com/fsouza/go-dockerclient.PortþDgo.string."[][]docker.PortBinding"PN[][]docker.PortBinding Dgo.string."[][]docker.PortBinding"þltype.[][]github.com/fsouza/go-dockerclient.PortBinding  q†Ç¢   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000PDgo.string."[][]docker.PortBinding"p~go.weak.type.*[][]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovaluehtype.[]github.com/fsouza/go-dockerclient.PortBindingþ¨go.typelink.[][]docker.PortBinding/[][]github.com/fsouza/go-dockerclient.PortBindingltype.[][]github.com/fsouza/go-dockerclient.PortBindingþbruntime.gcbits.0x48844448844448844448844400000000 H„DH„DH„DH„DþFgo.string."[8][]docker.PortBinding"PP[8][]docker.PortBinding Fgo.string."[8][]docker.PortBinding"þntype.[8][]github.com/fsouza/go-dockerclient.PortBindingÀÀÀ (Øh à runtime.algarray0bruntime.gcbits.0x48844448844448844448844400000000PFgo.string."[8][]docker.PortBinding"p€go.weak.type.*[8][]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovaluehtype.[]github.com/fsouza/go-dockerclient.PortBinding ltype.[][]github.com/fsouza/go-dockerclient.PortBindingþ¬go.typelink.[8][]docker.PortBinding/[8][]github.com/fsouza/go-dockerclient.PortBindingntype.[8][]github.com/fsouza/go-dockerclient.PortBindingþpgo.string."*map.bucket[docker.Port][]docker.PortBinding"€z,*map.bucket[docker.Port][]docker.PortBinding pgo.string."*map.bucket[docker.Port][]docker.PortBinding"þÎtype.*map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding  ä´~6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000Ppgo.string."*map.bucket[docker.Port][]docker.PortBinding"pàgo.weak.type.**map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovalueÌtype.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingþ,Ôtype..gc.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding,þÜtype..gcprog.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding*™™™™Y–eY–e þngo.string."map.bucket[docker.Port][]docker.PortBinding"€x+map.bucket[docker.Port][]docker.PortBinding ngo.string."map.bucket[docker.Port][]docker.PortBinding"þÌtype.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding°°P”þr€YˆH à runtime.algarray0Ôtype..gc.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding@Ütype..gcprog.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingPngo.string."map.bucket[docker.Port][]docker.PortBinding"pÞgo.weak.type.*map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovalueÀÌtype.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingÀ go.string."keys"à\type.[8]github.com/fsouza/go-dockerclient.Port$go.string."values"°ntype.[8][]github.com/fsouza/go-dockerclient.PortBindingà(go.string."overflow"€Îtype.*map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingþhgo.string."map.hdr[docker.Port][]docker.PortBinding"€r(map.hdr[docker.Port][]docker.PortBinding hgo.string."map.hdr[docker.Port][]docker.PortBinding"þÆtype.map.hdr[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingàà09ÈhÆ  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000Phgo.string."map.hdr[docker.Port][]docker.PortBinding"pØgo.weak.type.*map.hdr[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovalueÀÆtype.map.hdr[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingÀ&go.string."buckets"àÎtype.*map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding,go.string."oldbuckets"°Îtype.*map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingþ`go.string."map[docker.Port][]docker.PortBinding"pj$map[docker.Port][]docker.PortBinding `go.string."map[docker.Port][]docker.PortBinding"þ¾type.map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingÜÜ 5P € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P`go.string."map[docker.Port][]docker.PortBinding"pÐgo.weak.type.*map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovalueVtype.github.com/fsouza/go-dockerclient.Port htype.[]github.com/fsouza/go-dockerclient.PortBinding°Ìtype.map.bucket[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingÀÆtype.map.hdr[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingþ–go.typelink.map[docker.Port][]docker.PortBinding/map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBinding¾type.map[github.com/fsouza/go-dockerclient.Port][]github.com/fsouza/go-dockerclient.PortBindingþ*go.string."struct {}"@4 struct {} *go.string."struct {}"þtype.struct {}ÀÀ¬ö'™  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."struct {}"p.go.weak.type.*struct {}€"runtime.zerovalueÀtype.struct {}þ.go.string."[]struct {}"@8 []struct {} .go.string."[]struct {}"þ type.[]struct {}  ºÌ¥…   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P.go.string."[]struct {}"p2go.weak.type.*[]struct {}€"runtime.zerovaluetype.struct {}þFgo.typelink.[]struct {}/[]struct {} type.[]struct {}þ0go.string."[8]struct {}"@: [8]struct {} 0go.string."[8]struct {}"þ"type.[8]struct {}ÀÀ>ƒy ‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P0go.string."[8]struct {}"p4go.weak.type.*[8]struct {}€"runtime.zerovaluetype.struct {}  type.[]struct {}þJgo.typelink.[8]struct {}/[8]struct {}"type.[8]struct {}þZgo.string."*map.bucket[docker.Port]struct {}"pd!*map.bucket[docker.Port]struct {} Zgo.string."*map.bucket[docker.Port]struct {}"þ‚type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}  1S6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PZgo.string."*map.bucket[docker.Port]struct {}"p”go.weak.type.**map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}€"runtime.zerovalue€type.map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}þbruntime.gcbits.0x84848484848484848400000000000000 „„„„„„„„„þXgo.string."map.bucket[docker.Port]struct {}"pb map.bucket[docker.Port]struct {} Xgo.string."map.bucket[docker.Port]struct {}"þ€type.map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}°°±(눈 à runtime.algarray0bruntime.gcbits.0x84848484848484848400000000000000PXgo.string."map.bucket[docker.Port]struct {}"p’go.weak.type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}€"runtime.zerovalueÀ€type.map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}À go.string."keys"à\type.[8]github.com/fsouza/go-dockerclient.Port$go.string."values"°"type.[8]struct {}à(go.string."overflow"€‚type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}þRgo.string."map.hdr[docker.Port]struct {}"`\map.hdr[docker.Port]struct {} Rgo.string."map.hdr[docker.Port]struct {}"þztype.map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}àà0š¨†  à runtime.algarray0bruntime.gcbits.0x44844800000000000000000000000000PRgo.string."map.hdr[docker.Port]struct {}"pŒgo.weak.type.*map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}€"runtime.zerovalueÀztype.map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}À&go.string."buckets"à‚type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {},go.string."oldbuckets"°‚type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}þJgo.string."map[docker.Port]struct {}"`Tmap[docker.Port]struct {} Jgo.string."map[docker.Port]struct {}"þrtype.map[github.com/fsouza/go-dockerclient.Port]struct {}ÜÜ'Ê75 € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PJgo.string."map[docker.Port]struct {}"p„go.weak.type.*map[github.com/fsouza/go-dockerclient.Port]struct {}€"runtime.zerovalueVtype.github.com/fsouza/go-dockerclient.Port type.struct {}°€type.map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}Àztype.map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}þ´go.typelink.map[docker.Port]struct {}/map[github.com/fsouza/go-dockerclient.Port]struct {}rtype.map[github.com/fsouza/go-dockerclient.Port]struct {}þTgclocals·3280bececceccd33cb74587feedb1f9fþTgclocals·2dc77d960dd3e4b3de2361f9cbd75783þTgclocals·65526a5f07004f02424fe51b799cdd23  +þTgclocals·fa7203fd5ed88aea99b7be572f707eb0 þ~type..hashfunc.[1]github.com/fsouza/go-dockerclient.PortBindingvtype..hash.[1]github.com/fsouza/go-dockerclient.PortBindingþztype..eqfunc.[1]github.com/fsouza/go-dockerclient.PortBindingrtype..eq.[1]github.com/fsouza/go-dockerclient.PortBindingþttype..alg.[1]github.com/fsouza/go-dockerclient.PortBinding ~type..hashfunc.[1]github.com/fsouza/go-dockerclient.PortBindingztype..eqfunc.[1]github.com/fsouza/go-dockerclient.PortBindingþbruntime.gcbits.0x48480000000000000000000000000000 HHþBgo.string."[1]docker.PortBinding"PL[1]docker.PortBinding Bgo.string."[1]docker.PortBinding"þjtype.[1]github.com/fsouza/go-dockerclient.PortBindingÀÀ ·ñƒ‘ ttype..alg.[1]github.com/fsouza/go-dockerclient.PortBinding0bruntime.gcbits.0x48480000000000000000000000000000PBgo.string."[1]docker.PortBinding"p|go.weak.type.*[1]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovaluedtype.github.com/fsouza/go-dockerclient.PortBinding htype.[]github.com/fsouza/go-dockerclient.PortBindingþ¤go.typelink.[1]docker.PortBinding/[1]github.com/fsouza/go-dockerclient.PortBindingjtype.[1]github.com/fsouza/go-dockerclient.PortBindingþ@go.string."struct { ID string }"PJstruct { ID string } @go.string."struct { ID string }"þgo.string."ID"0&ID go.string."ID"þ2type.struct { ID string }KÔçˆ À runtime.algarray0bruntime.gcbits.0x48000000000000000000000000000000P@go.string."struct { ID string }"pDgo.weak.type.*struct { ID string }€"runtime.zerovalueÀ2type.struct { ID string }Àgo.string."ID"àtype.stringþ,go.string."*struct {}"@6 +*struct {} ,go.string."*struct {}"þtype.*struct {}  J$©å6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*struct {}"p0go.weak.type.**struct {}€"runtime.zerovaluetype.struct {}þTgo.string."*map.hdr[docker.Port]struct {}"`^*map.hdr[docker.Port]struct {} Tgo.string."*map.hdr[docker.Port]struct {}"þ|type.*map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}  5¡BN6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PTgo.string."*map.hdr[docker.Port]struct {}"pŽgo.weak.type.**map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}€"runtime.zerovalueztype.map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}þTgo.string."map.iter[docker.Port]struct {}"`^map.iter[docker.Port]struct {} Tgo.string."map.iter[docker.Port]struct {}"þ|type.map.iter[github.com/fsouza/go-dockerclient.Port]struct {}ððPótÏ… (0( à runtime.algarray0bruntime.gcbits.0x88888844440000000000000000000000PTgo.string."map.iter[docker.Port]struct {}"pŽgo.weak.type.*map.iter[github.com/fsouza/go-dockerclient.Port]struct {}€"runtime.zerovalueÀ|type.map.iter[github.com/fsouza/go-dockerclient.Port]struct {}Àgo.string."key"àXtype.*github.com/fsouza/go-dockerclient.Portgo.string."val"°type.*struct {}àgo.string."t"€type.*uint8°go.string."h"Ð|type.*map.hdr[github.com/fsouza/go-dockerclient.Port]struct {}€&go.string."buckets" ‚type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {}Ð go.string."bptr"ð‚type.*map.bucket[github.com/fsouza/go-dockerclient.Port]struct {} "go.string."other"Àtype.[4]uintptrþDgo.string."*[1]docker.PortBinding"PN*[1]docker.PortBinding Dgo.string."*[1]docker.PortBinding"þltype.*[1]github.com/fsouza/go-dockerclient.PortBinding  ÇÊß:6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."*[1]docker.PortBinding"p~go.weak.type.**[1]github.com/fsouza/go-dockerclient.PortBinding€"runtime.zerovaluejtype.[1]github.com/fsouza/go-dockerclient.PortBindingþ&go.string."[]uint8"00[]uint8 &go.string."[]uint8"þtype.[]uint8  ß~.8   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P&go.string."[]uint8"p*go.weak.type.*[]uint8€"runtime.zerovaluetype.uint8þ6go.typelink.[]uint8/[]uint8type.[]uint8þ*go.string."[16]uint8"@4 [16]uint8 *go.string."[16]uint8"þtype.[16]uint8ÀÀ·}5G‘ À runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P*go.string."[16]uint8"p.go.weak.type.*[16]uint8€"runtime.zerovaluetype.uint8 type.[]uint8þ>go.typelink.[16]uint8/[16]uint8type.[16]uint8þ,go.string."*[16]uint8"@6 +*[16]uint8 ,go.string."*[16]uint8"þtype.*[16]uint8  ´/ Q6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[16]uint8"p0go.weak.type.**[16]uint8€"runtime.zerovaluetype.[16]uint8þ,go.string."[][]string"@6 +[][]string ,go.string."[][]string"þtype.[][]string  ¼:è   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P,go.string."[][]string"p0go.weak.type.*[][]string€"runtime.zerovaluetype.[]stringþBgo.typelink.[][]string/[][]stringtype.[][]stringþ.go.string."[1][]string"@8 [1][]string .go.string."[1][]string"þ type.[1][]stringÀÀ©y=Y à runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P.go.string."[1][]string"p2go.weak.type.*[1][]string€"runtime.zerovaluetype.[]string type.[][]stringþFgo.typelink.[1][]string/[1][]string type.[1][]stringþ,go.string."*[8]string"@6 +*[8]string ,go.string."*[8]string"þtype.*[8]string  ­”o6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P,go.string."*[8]string"p0go.weak.type.**[8]string€"runtime.zerovaluetype.[8]stringþ0go.string."*[1][]string"@: *[1][]string 0go.string."*[1][]string"þ"type.*[1][]string  ö^’6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P0go.string."*[1][]string"p4go.weak.type.**[1][]string€"runtime.zerovalue type.[1][]stringþ"go.string."[]int"0,[]int "go.string."[]int"þtype.[]int  Žfù   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P"go.string."[]int"p&go.weak.type.*[]int€"runtime.zerovaluetype.intþ.go.typelink.[]int/[]inttype.[]intþ$go.string."[8]int"0.[8]int $go.string."[8]int"þtype.[8]intÀÀ@–™Õ‘  runtime.algarray0^runtime.gcbits.0x000000000000000000000000000000P$go.string."[8]int"p(go.weak.type.*[8]int€"runtime.zerovaluetype.int type.[]intþ2go.typelink.[8]int/[8]inttype.[8]intþDgo.string."*map.bucket[string]int"PN*map.bucket[string]int Dgo.string."*map.bucket[string]int"þ6type.*map.bucket[string]int  ɾ̜6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PDgo.string."*map.bucket[string]int"pHgo.weak.type.**map.bucket[string]int€"runtime.zerovalue4type.map.bucket[string]intþbruntime.gcbits.0x84848484848484844444444484000000 „„„„„„„„DDDD„þBgo.string."map.bucket[string]int"PLmap.bucket[string]int Bgo.string."map.bucket[string]int"þ4type.map.bucket[string]int°°Ð]hcqˆÈ à runtime.algarray0bruntime.gcbits.0x84848484848484844444444484000000PBgo.string."map.bucket[string]int"pFgo.weak.type.*map.bucket[string]int€"runtime.zerovalueÀ4type.map.bucket[string]intÀ go.string."keys"àtype.[8]string$go.string."values"°type.[8]intà(go.string."overflow"€6type.*map.bucket[string]intþmap[string]int 4go.string."map[string]int"þ&type.map[string]intÜÜåÛÈJ5Ð € runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P4go.string."map[string]int"p8go.weak.type.*map[string]int€"runtime.zerovaluetype.string type.int°4type.map.bucket[string]intÀ.type.map.hdr[string]intþRgo.typelink.map[string]int/map[string]int&type.map[string]intþ*go.string."[][]uint8"@4 [][]uint8 *go.string."[][]uint8"þtype.[][]uint8  õ}ï   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P*go.string."[][]uint8"p.go.weak.type.*[][]uint8€"runtime.zerovaluetype.[]uint8þ>go.typelink.[][]uint8/[][]uint8type.[][]uint8þ(go.string."*[]uint8"@2*[]uint8 (go.string."*[]uint8"þtype.*[]uint8  ¥ŽÐi6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P(go.string."*[]uint8"p,go.weak.type.**[]uint8€"runtime.zerovaluetype.[]uint8þ&go.string."*func()"00*func() &go.string."*func()"þtype.*func()  ›u6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P&go.string."*func()"p*go.weak.type.**func()€"runtime.zerovaluetype.func()þ@go.string."**docker.ExecInspect"PJ**docker.ExecInspect @go.string."**docker.ExecInspect"þhtype.**github.com/fsouza/go-dockerclient.ExecInspect  V¤¿ô6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P@go.string."**docker.ExecInspect"pzgo.weak.type.***github.com/fsouza/go-dockerclient.ExecInspect€"runtime.zerovalueftype.*github.com/fsouza/go-dockerclient.ExecInspectþ8go.string."**docker.Network"PB**docker.Network 8go.string."**docker.Network"þ`type.**github.com/fsouza/go-dockerclient.Network  èbS6   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000P8go.string."**docker.Network"prgo.weak.type.***github.com/fsouza/go-dockerclient.Network€"runtime.zerovalue^type.*github.com/fsouza/go-dockerclient.Networkþ8go.string."[]docker.Network"PB[]docker.Network 8go.string."[]docker.Network"þ`type.[]github.com/fsouza/go-dockerclient.Network  »&HÕ   runtime.algarray0bruntime.gcbits.0x48844400000000000000000000000000P8go.string."[]docker.Network"prgo.weak.type.*[]github.com/fsouza/go-dockerclient.Network€"runtime.zerovalue\type.github.com/fsouza/go-dockerclient.Networkþgo.typelink.[]docker.Network/[]github.com/fsouza/go-dockerclient.Network`type.[]github.com/fsouza/go-dockerclient.NetworkþRgo.string."**docker.CreateNetworkOptions"`\**docker.CreateNetworkOptions Rgo.string."**docker.CreateNetworkOptions"þztype.**github.com/fsouza/go-dockerclient.CreateNetworkOptions  Õ—36   runtime.algarray0bruntime.gcbits.0x88000000000000000000000000000000PRgo.string."**docker.CreateNetworkOptions"pŒgo.weak.type.***github.com/fsouza/go-dockerclient.CreateNetworkOptions€"runtime.zerovaluextype.*github.com/fsouza/go-dockerclient.CreateNetworkOptionsþtype..eq.[8]net/http.Handler·f8type..eq.[8]net/http.Handlerþ$runtime.ifaceeq·fruntime.ifaceeqþ.type..hash.[1]string·f(type..hash.[1]stringþ*type..eq.[1]string·f$type..eq.[1]stringþ:type..hash.[2]interface {}·f4type..hash.[2]interface {}þ.runtime.nilinterhash·f(runtime.nilinterhashþ6type..eq.[2]interface {}·f0type..eq.[2]interface {}þ$runtime.efaceeq·fruntime.efaceeqþ:type..hash.[1]interface {}·f4type..hash.[1]interface {}þ6type..eq.[1]interface {}·f0type..eq.[1]interface {}þntype..hash.[8]github.com/fsouza/go-dockerclient.Port·fhtype..hash.[8]github.com/fsouza/go-dockerclient.Portþjtype..eq.[8]github.com/fsouza/go-dockerclient.Port·fdtype..eq.[8]github.com/fsouza/go-dockerclient.Portþ|type..hash.[1]github.com/fsouza/go-dockerclient.PortBinding·fvtype..hash.[1]github.com/fsouza/go-dockerclient.PortBindingþvtype..hash.github.com/fsouza/go-dockerclient.PortBinding·fptype..hash.github.com/fsouza/go-dockerclient.PortBindingþxtype..eq.[1]github.com/fsouza/go-dockerclient.PortBinding·frtype..eq.[1]github.com/fsouza/go-dockerclient.PortBindingþ"runtime.zerovalue0ÿÿgo13ld \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore new file mode 100644 index 000000000..3591f9ff3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.test + +# Folders +_obj +_test +.vagrant + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml b/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml new file mode 100644 index 000000000..a9e5cc3cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml @@ -0,0 +1,41 @@ +language: go +go: +- 1.3.3 +- 1.4.2 + +env: + global: + - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 + - TOXIPROXY_ADDR=http://localhost:8474 + - KAFKA_INSTALL_ROOT=/home/travis/kafka + - KAFKA_HOSTNAME=localhost + - DEBUG=true + matrix: + - KAFKA_VERSION=0.8.1.1 + - KAFKA_VERSION=0.8.2.1 + +before_install: +- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} +- vagrant/install_cluster.sh +- vagrant/boot_cluster.sh +- vagrant/create_topics.sh + + +install: +- make install_dependencies + +script: +- make test +- make vet +- make errcheck +- make fmt + +matrix: + include: + - go: tip + env: KAFKA_VERSION=0.8.2.1 + allow_failures: + - go: tip + fast_finish: true + +sudo: false diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md b/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md new file mode 100644 index 000000000..5bea6bc3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md @@ -0,0 +1,157 @@ +# Changelog + +#### Version 1.5.0 (unreleased) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/Shopify/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/Shopify/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/Shopify/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/Shopify/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/Shopify/sarama/pull/475)). + +#### Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/Shopify/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). + +#### Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/Shopify/sarama/pull/456)). + +#### Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/Shopify/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/Shopify/sarama/pull/450), + [#451](https://github.com/Shopify/sarama/pull/451)). + +#### Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/Shopify/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/Shopify/sarama/pull/439), + [#442](https://github.com/Shopify/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/Shopify/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/Shopify/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/Shopify/sarama/pull/325)). + +#### Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/Shopify/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/Shopify/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/Shopify/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/Shopify/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/Shopify/sarama/pull/422)). + +#### Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/Shopify/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/Shopify/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/Shopify/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/Shopify/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/Shopify/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/Shopify/sarama/pull/390), + [#400](https://github.com/Shopify/sarama/pull/400)). + +#### Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/Shopify/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/Shopify/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/Shopify/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). + + +#### Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md new file mode 100644 index 000000000..b0f107cbc --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing + +Contributions are always welcome, both reporting issues and submitting pull requests! + +### Reporting issues + +Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. + +- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version. +- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. +- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. + +Also, please include the following information about your environment, so we can help you faster: + +- What version of Kafka are you using? +- What version of Go are you using? +- What are the values of your Producer/Consumer/Client configuration? + + +### Submitting pull requests + +We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following. + +- If you plan to work on something major, please open an issue to discuss the design first. +- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. +- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. +- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs. +- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. +- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. +- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. +- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE b/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE new file mode 100644 index 000000000..8121b63b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile b/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile new file mode 100644 index 000000000..b76e97a97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile @@ -0,0 +1,24 @@ +default: fmt vet errcheck test + +test: + go test -v -timeout 60s -race ./... + +vet: + go vet ./... + +errcheck: + errcheck github.com/Shopify/sarama/... + +fmt: + @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi + +install_dependencies: install_errcheck install_go_vet get + +install_errcheck: + go get github.com/kisielk/errcheck + +install_go_vet: + go get golang.org/x/tools/cmd/vet + +get: + go get -t diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/README.md new file mode 100644 index 000000000..486372730 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/README.md @@ -0,0 +1,31 @@ +sarama +====== + +[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) +[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) + +Sarama is an MIT-licensed Go client library for Apache Kafka 0.8 (and later). + +### Getting started + +- API documentation and example are available via godoc at https://godoc.org/github.com/Shopify/sarama. +- Mocks for testing are available in the [mocks](./mocks) subpackage. +- The [examples](./examples) directory contains more elaborate example applications. +- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. +- There is a google group for Kafka client users and authors at https://groups.google.com/forum/#!forum/kafka-clients + +### Compatibility and API stability + +Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest releases of Kafka +and Go, and we provide a two month grace period for older releases. This means we currently officially +support Go 1.3 and 1.4, and Kafka 0.8.1 and 0.8.2. + +Sarama follows semantic versioning and provides API stability via the gopkg.in service. +You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +A changelog is available [here](CHANGELOG.md). + +### Other + +* [Sarama wiki](https://github.com/Shopify/sarama/wiki) to get started hacking on sarama itself. +* [Kafka Project Home](https://kafka.apache.org/) +* [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile b/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile new file mode 100644 index 000000000..4862dd936 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile @@ -0,0 +1,22 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +MEMORY = 3072 + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "hashicorp/precise64" + + config.vm.provision :shell, path: "vagrant/provision.sh" + + config.vm.network "private_network", ip: "192.168.100.67" + + config.vm.provider "vmware_fusion" do |v| + v.vmx["memsize"] = MEMORY.to_s + end + config.vm.provider "virtualbox" do |v| + v.memory = MEMORY + end +end diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go new file mode 100644 index 000000000..8e229490f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go @@ -0,0 +1,924 @@ +package sarama + +import ( + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer, flushing any messages it may have + // buffered. The shutdown has completed when both the Errors and Successes channels + // have been closed. When calling AsyncClose, you *must* continue to read from those + // channels in order to drain the results of any messages in flight. + AsyncClose() + + // Close shuts down the producer and flushes any messages it may have buffered. + // You must call this function before a producer object passes out of scope, as + // it may otherwise leak memory. You must call this before calling Close on the + // underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when AckSuccesses is enabled. + // If Return.Successes is true, you MUST read from this channel or the Producer will deadlock. + // It is suggested that you send and read messages together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this channel + // or the Producer will deadlock when the channel is full. Alternatively, you can set + // Producer.Return.Errors in your config to false, which prevents errors to be returned. + Errors() <-chan *ProducerError +} + +type asyncProducer struct { + client Client + conf *Config + ownClient bool + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]chan<- *ProducerMessage + brokerRefs map[chan<- *ProducerMessage]int + brokerLock sync.Mutex +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + p.(*asyncProducer).ownClient = true + return p, nil +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]chan<- *ProducerMessage), + brokerRefs: make(map[chan<- *ProducerMessage]int), + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + chaser flagSet = 1 << iota // message is last in a group that failed + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + Key Encoder // The partitioning key for this message. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder. + Value Encoder // The actual message to store in Kafka. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder. + + // These are filled in by the producer as the message is processed + Offset int64 // Offset is the offset of the message stored on the broker. This is only guaranteed to be defined if the message was successfully delivered and RequiredAcks is not NoResponse. + Partition int32 // Partition is the partition that the message was sent to. This is only guaranteed to be defined if the message was successfully delivered. + + Metadata interface{} // This field is used to hold arbitrary data you wish to include so it will be available when receiving on the Successes and Errors channels. Sarama completely ignores this field and is only to be used for pass-through data. + + retries int + flags flagSet +} + +func (m *ProducerMessage) byteSize() int { + size := 26 // the metadata overhead of CRC, flags, etc. + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for _ = range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + if (p.conf.Producer.Compression == CompressionNone && msg.Value != nil && msg.Value.Length() > p.conf.Producer.MaxMessageBytes) || + (msg.byteSize() > p.conf.Producer.MaxMessageBytes) { + + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + if tp.partitioner.RequiresConsistency() { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + output chan<- *ProducerMessage + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.output = pp.parent.getBrokerProducer(pp.leader) + } + + for msg := range pp.input { + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser) + if msg.flags&chaser == chaser { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&chaser == chaser { + // this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + pp.output <- msg + } + + if pp.output != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a chaser so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: chaser, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.output = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.output <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.output = pp.parent.getBrokerProducer(pp.leader) + return nil + }) +} + +// one per broker, constructs both an aggregator and a flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { + input := make(chan *ProducerMessage) + bridge := make(chan []*ProducerMessage) + + a := &aggregator{ + parent: p, + broker: broker, + input: input, + output: bridge, + } + go withRecover(a.run) + + f := &flusher{ + parent: p, + broker: broker, + input: bridge, + currentRetries: make(map[string]map[int32]error), + } + go withRecover(f.run) + + return input +} + +// groups messages together into appropriately-sized batches for sending to the broker +// based on https://godoc.org/github.com/eapache/channels#BatchingChannel +type aggregator struct { + parent *asyncProducer + broker *Broker + input <-chan *ProducerMessage + output chan<- []*ProducerMessage + + buffer []*ProducerMessage + bufferBytes int + timer <-chan time.Time +} + +func (a *aggregator) run() { + var output chan<- []*ProducerMessage + + for { + select { + case msg := <-a.input: + if msg == nil { + goto shutdown + } + + if a.wouldOverflow(msg) { + Logger.Printf("producer/aggregator/%d maximum request accumulated, forcing blocking flush\n", a.broker.ID()) + a.output <- a.buffer + a.reset() + output = nil + } + + a.buffer = append(a.buffer, msg) + a.bufferBytes += msg.byteSize() + + if a.readyToFlush(msg) { + output = a.output + } else if a.parent.conf.Producer.Flush.Frequency > 0 && a.timer == nil { + a.timer = time.After(a.parent.conf.Producer.Flush.Frequency) + } + case <-a.timer: + output = a.output + case output <- a.buffer: + a.reset() + output = nil + } + } + +shutdown: + if len(a.buffer) > 0 { + a.output <- a.buffer + } + close(a.output) +} + +func (a *aggregator) wouldOverflow(msg *ProducerMessage) bool { + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case a.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a compressed message-batch? + case a.parent.conf.Producer.Compression != CompressionNone && a.bufferBytes+msg.byteSize() >= a.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case a.parent.conf.Producer.Flush.MaxMessages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (a *aggregator) readyToFlush(msg *ProducerMessage) bool { + switch { + // If all three config values are 0, we always flush as-fast-as-possible + case a.parent.conf.Producer.Flush.Frequency == 0 && a.parent.conf.Producer.Flush.Bytes == 0 && a.parent.conf.Producer.Flush.Messages == 0: + return true + // If the messages is a chaser we must flush to maintain the state-machine + case msg.flags&chaser == chaser: + return true + // If we've passed the message trigger-point + case a.parent.conf.Producer.Flush.Messages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case a.parent.conf.Producer.Flush.Bytes > 0 && a.bufferBytes >= a.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (a *aggregator) reset() { + a.timer = nil + a.buffer = nil + a.bufferBytes = 0 +} + +// takes a batch at a time from the aggregator and sends to the broker +type flusher struct { + parent *asyncProducer + broker *Broker + input <-chan []*ProducerMessage + + currentRetries map[string]map[int32]error +} + +func (f *flusher) run() { + var closing error + + Logger.Printf("producer/flusher/%d starting up\n", f.broker.ID()) + + for batch := range f.input { + if closing != nil { + f.parent.retryMessages(batch, closing) + continue + } + + msgSets := f.groupAndFilter(batch) + request := f.parent.buildRequest(msgSets) + if request == nil { + continue + } + + response, err := f.broker.Produce(request) + + switch err.(type) { + case nil: + break + case PacketEncodingError: + f.parent.returnErrors(batch, err) + continue + default: + Logger.Printf("producer/flusher/%d state change to [closing] because %s\n", f.broker.ID(), err) + f.parent.abandonBrokerConnection(f.broker) + _ = f.broker.Close() + closing = err + f.parent.retryMessages(batch, err) + continue + } + + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + f.parent.returnSuccesses(batch) + continue + } + + f.parseResponse(msgSets, response) + } + Logger.Printf("producer/flusher/%d shut down\n", f.broker.ID()) +} + +func (f *flusher) groupAndFilter(batch []*ProducerMessage) map[string]map[int32][]*ProducerMessage { + msgSets := make(map[string]map[int32][]*ProducerMessage) + + for i, msg := range batch { + + if f.currentRetries[msg.Topic] != nil && f.currentRetries[msg.Topic][msg.Partition] != nil { + // we're currently retrying this partition so we need to filter out this message + f.parent.retryMessages([]*ProducerMessage{msg}, f.currentRetries[msg.Topic][msg.Partition]) + batch[i] = nil + + if msg.flags&chaser == chaser { + // ...but now we can start processing future messages again + Logger.Printf("producer/flusher/%d state change to [normal] on %s/%d\n", + f.broker.ID(), msg.Topic, msg.Partition) + delete(f.currentRetries[msg.Topic], msg.Partition) + } + + continue + } + + partitionSet := msgSets[msg.Topic] + if partitionSet == nil { + partitionSet = make(map[int32][]*ProducerMessage) + msgSets[msg.Topic] = partitionSet + } + + partitionSet[msg.Partition] = append(partitionSet[msg.Partition], msg) + } + + return msgSets +} + +func (f *flusher) parseResponse(msgSets map[string]map[int32][]*ProducerMessage, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + for topic, partitionSet := range msgSets { + for partition, msgs := range partitionSet { + block := response.GetBlock(topic, partition) + if block == nil { + f.parent.returnErrors(msgs, ErrIncompleteResponse) + continue + } + + switch block.Err { + // Success + case ErrNoError: + for i := range msgs { + msgs[i].Offset = block.Offset + int64(i) + } + f.parent.returnSuccesses(msgs) + // Retriable errors + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/flusher/%d state change to [retrying] on %s/%d because %v\n", + f.broker.ID(), topic, partition, block.Err) + if f.currentRetries[topic] == nil { + f.currentRetries[topic] = make(map[int32]error) + } + f.currentRetries[topic][partition] = block.Err + f.parent.retryMessages(msgs, block.Err) + // Other non-retriable errors + default: + f.parent.returnErrors(msgs, block.Err) + } + } + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + if p.ownClient { + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) buildRequest(batch map[string]map[int32][]*ProducerMessage) *ProduceRequest { + + req := &ProduceRequest{RequiredAcks: p.conf.Producer.RequiredAcks, Timeout: int32(p.conf.Producer.Timeout / time.Millisecond)} + empty := true + + for topic, partitionSet := range batch { + for partition, msgSet := range partitionSet { + setToSend := new(MessageSet) + setSize := 0 + for _, msg := range msgSet { + var keyBytes, valBytes []byte + var err error + if msg.Key != nil { + if keyBytes, err = msg.Key.Encode(); err != nil { + p.returnError(msg, err) + continue + } + } + if msg.Value != nil { + if valBytes, err = msg.Value.Encode(); err != nil { + p.returnError(msg, err) + continue + } + } + + if p.conf.Producer.Compression != CompressionNone && setSize+msg.byteSize() > p.conf.Producer.MaxMessageBytes { + // compression causes message-sets to be wrapped as single messages, which have tighter + // size requirements, so we have to respect those limits + valBytes, err := encode(setToSend) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes}) + setToSend = new(MessageSet) + setSize = 0 + } + setSize += msg.byteSize() + + setToSend.addMessage(&Message{Codec: CompressionNone, Key: keyBytes, Value: valBytes}) + empty = false + } + + if p.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, setToSend) + } else { + valBytes, err := encode(setToSend) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes}) + } + } + } + + if empty { + return nil + } + return req +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + if msg != nil { + p.returnError(msg, err) + } + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if msg == nil { + continue + } + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + if msg == nil { + continue + } + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + delete(p.brokers, broker) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go new file mode 100644 index 000000000..403456839 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go @@ -0,0 +1,743 @@ +package sarama + +import ( + "errors" + "log" + "os" + "os/signal" + "sync" + "testing" + "time" +) + +const TestMessage = "ABC THE MESSAGE" + +func closeProducer(t *testing.T, p AsyncProducer) { + var wg sync.WaitGroup + p.AsyncClose() + + wg.Add(2) + go func() { + for _ = range p.Successes() { + t.Error("Unexpected message on Successes()") + } + wg.Done() + }() + go func() { + for msg := range p.Errors() { + t.Error(msg.Err) + } + wg.Done() + }() + wg.Wait() +} + +func expectResults(t *testing.T, p AsyncProducer, successes, errors int) { + for successes > 0 || errors > 0 { + select { + case msg := <-p.Errors(): + if msg.Msg.flags != 0 { + t.Error("Message had flags set") + } + errors-- + if errors < 0 { + t.Error(msg.Err) + } + case msg := <-p.Successes(): + if msg.flags != 0 { + t.Error("Message had flags set") + } + successes-- + if successes < 0 { + t.Error("Too many successes") + } + } + } +} + +type testPartitioner chan *int32 + +func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) { + part := <-p + if part == nil { + return 0, errors.New("BOOM") + } + + return *part, nil +} + +func (p testPartitioner) RequiresConsistency() bool { + return true +} + +func (p testPartitioner) feed(partition int32) { + p <- &partition +} + +func TestAsyncProducer(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i} + } + for i := 0; i < 10; i++ { + select { + case msg := <-producer.Errors(): + t.Error(msg.Err) + if msg.Msg.flags != 0 { + t.Error("Message had flags set") + } + case msg := <-producer.Successes(): + if msg.flags != 0 { + t.Error("Message had flags set") + } + if msg.Metadata.(int) != i { + t.Error("Message metadata did not match") + } + } + } + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestAsyncProducerMultipleFlushes(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + leader.Returns(prodSuccess) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 5 + config.Producer.Return.Successes = true + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for flush := 0; flush < 3; flush++ { + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + expectResults(t, producer, 5, 0) + } + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestAsyncProducerMultipleBrokers(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader0 := newMockBroker(t, 2) + leader1 := newMockBroker(t, 3) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID()) + metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodResponse0 := new(ProduceResponse) + prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError) + leader0.Returns(prodResponse0) + + prodResponse1 := new(ProduceResponse) + prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError) + leader1.Returns(prodResponse1) + + config := NewConfig() + config.Producer.Flush.Messages = 5 + config.Producer.Return.Successes = true + config.Producer.Partitioner = NewRoundRobinPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + expectResults(t, producer, 10, 0) + + closeProducer(t, producer) + leader1.Close() + leader0.Close() + seedBroker.Close() +} + +func TestAsyncProducerCustomPartitioner(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodResponse := new(ProduceResponse) + prodResponse.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodResponse) + + config := NewConfig() + config.Producer.Flush.Messages = 2 + config.Producer.Return.Successes = true + config.Producer.Partitioner = func(topic string) Partitioner { + p := make(testPartitioner) + go func() { + p.feed(0) + p <- nil + p <- nil + p <- nil + p.feed(0) + }() + return p + } + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + expectResults(t, producer, 2, 3) + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestAsyncProducerFailureRetry(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader1 := newMockBroker(t, 2) + leader2 := newMockBroker(t, 3) + + metadataLeader1 := new(MetadataResponse) + metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader1) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + seedBroker.Close() + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader1.Returns(prodNotLeader) + + metadataLeader2 := new(MetadataResponse) + metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) + metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) + leader1.Returns(metadataLeader2) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + leader1.Close() + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + leader2.Close() + closeProducer(t, producer) +} + +// If a Kafka broker becomes unavailable and then returns back in service, then +// producer reconnects to it and continues sending messages. +func TestAsyncProducerBrokerBounce(t *testing.T) { + // Given + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + leaderAddr := leader.Addr() + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + + config := NewConfig() + config.Producer.Flush.Messages = 1 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // When: a broker connection gets reset by a broker (network glitch, restart, you name it). + leader.Close() // producer should get EOF + leader = newMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles + seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again + + // Then: a produced message goes through the new broker connection. + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + closeProducer(t, producer) + seedBroker.Close() + leader.Close() +} + +func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader1 := newMockBroker(t, 2) + leader2 := newMockBroker(t, 3) + + metadataLeader1 := new(MetadataResponse) + metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader1) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Max = 3 + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + leader1.Close() // producer should get EOF + seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down + seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down + + // ok fine, tell it to go to leader2 finally + metadataLeader2 := new(MetadataResponse) + metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) + metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader2) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + seedBroker.Close() + leader2.Close() + + closeProducer(t, producer) +} + +func TestAsyncProducerMultipleRetries(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader1 := newMockBroker(t, 2) + leader2 := newMockBroker(t, 3) + + metadataLeader1 := new(MetadataResponse) + metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader1) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Max = 4 + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader1.Returns(prodNotLeader) + + metadataLeader2 := new(MetadataResponse) + metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) + metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader2) + leader2.Returns(prodNotLeader) + seedBroker.Returns(metadataLeader1) + leader1.Returns(prodNotLeader) + seedBroker.Returns(metadataLeader1) + leader1.Returns(prodNotLeader) + seedBroker.Returns(metadataLeader2) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + seedBroker.Close() + leader1.Close() + leader2.Close() + closeProducer(t, producer) +} + +func TestAsyncProducerOutOfRetries(t *testing.T) { + t.Skip("Enable once bug #294 is fixed.") + + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + config.Producer.Retry.Max = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader.Returns(prodNotLeader) + + for i := 0; i < 10; i++ { + select { + case msg := <-producer.Errors(): + if msg.Err != ErrNotLeaderForPartition { + t.Error(msg.Err) + } + case <-producer.Successes(): + t.Error("Unexpected success") + } + } + + seedBroker.Returns(metadataResponse) + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + expectResults(t, producer, 10, 0) + + leader.Close() + seedBroker.Close() + safeClose(t, producer) +} + +func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + leaderAddr := leader.Addr() + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + config.Producer.Retry.Max = 1 + config.Producer.Partitioner = NewRoundRobinPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + // prime partition 0 + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // prime partition 1 + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + prodSuccess = new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // reboot the broker (the producer will get EOF on its existing connection) + leader.Close() + leader = newMockBrokerAddr(t, 2, leaderAddr) + + // send another message on partition 0 to trigger the EOF and retry + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + + // tell partition 0 to go to that broker again + seedBroker.Returns(metadataResponse) + + // succeed this time + prodSuccess = new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // shutdown + closeProducer(t, producer) + seedBroker.Close() + leader.Close() +} + +func TestAsyncProducerFlusherRetryCondition(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Producer.Flush.Messages = 5 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + config.Producer.Retry.Max = 1 + config.Producer.Partitioner = NewManualPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + // prime partitions + for p := int32(0); p < 2; p++ { + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p} + } + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", p, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 5, 0) + } + + // send more messages on partition 0 + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} + } + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader.Returns(prodNotLeader) + + // tell partition 0 to go to that broker again + seedBroker.Returns(metadataResponse) + + // succeed this time + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 5, 0) + + // put five more through + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} + } + leader.Returns(prodSuccess) + expectResults(t, producer, 5, 0) + + // shutdown + closeProducer(t, producer) + seedBroker.Close() + leader.Close() +} + +func TestAsyncProducerRetryShutdown(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataLeader := new(MetadataResponse) + metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) + metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + producer.AsyncClose() + time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in + + producer.Input() <- &ProducerMessage{Topic: "FOO"} + if err := <-producer.Errors(); err.Err != ErrShuttingDown { + t.Error(err) + } + + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader.Returns(prodNotLeader) + + seedBroker.Returns(metadataLeader) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + seedBroker.Close() + leader.Close() + + // wait for the async-closed producer to shut down fully + for err := range producer.Errors() { + t.Error(err) + } +} + +// This example shows how to use the producer while simultaneously +// reading the Errors channel to know about any failures. +func ExampleAsyncProducer_select() { + producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil) + if err != nil { + panic(err) + } + + defer func() { + if err := producer.Close(); err != nil { + log.Fatalln(err) + } + }() + + // Trap SIGINT to trigger a shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + var enqueued, errors int +ProducerLoop: + for { + select { + case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}: + enqueued++ + case err := <-producer.Errors(): + log.Println("Failed to produce message", err) + errors++ + case <-signals: + break ProducerLoop + } + } + + log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors) +} + +// This example shows how to use the producer with separate goroutines +// reading from the Successes and Errors channels. Note that in order +// for the Successes channel to be populated, you have to set +// config.Producer.Return.Successes to true. +func ExampleAsyncProducer_goroutines() { + config := NewConfig() + config.Producer.Return.Successes = true + producer, err := NewAsyncProducer([]string{"localhost:9092"}, config) + if err != nil { + panic(err) + } + + // Trap SIGINT to trigger a graceful shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + var ( + wg sync.WaitGroup + enqueued, successes, errors int + ) + + wg.Add(1) + go func() { + defer wg.Done() + for _ = range producer.Successes() { + successes++ + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for err := range producer.Errors() { + log.Println(err) + errors++ + } + }() + +ProducerLoop: + for { + message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} + select { + case producer.Input() <- message: + enqueued++ + + case <-signals: + producer.AsyncClose() // Trigger a shutdown of the producer. + break ProducerLoop + } + } + + wg.Wait() + + log.Printf("Successfully produced: %d; errors: %d\n", successes, errors) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go b/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go new file mode 100644 index 000000000..eb5bc0bf8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go @@ -0,0 +1,385 @@ +package sarama + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. +type Broker struct { + id int32 + addr string + + conf *Config + correlationID int32 + conn net.Conn + connErr error + lock sync.Mutex + opened int32 + + responses chan responsePromise + done chan bool +} + +type responsePromise struct { + correlationID int32 + packets chan []byte + errors chan error +} + +// NewBroker creates and returns a Broker targetting the given host:port address. +// This does not attempt to actually connect, you have to call Open() for that. +func NewBroker(addr string) *Broker { + return &Broker{id: -1, addr: addr} +} + +// Open tries to connect to the Broker if it is not already connected or connecting, but does not block +// waiting for the connection to complete. This means that any subsequent operations on the broker will +// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, +// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or +// AlreadyConnected. If conf is nil, the result of NewConfig() is used. +func (b *Broker) Open(conf *Config) error { + if conf == nil { + conf = NewConfig() + } + + err := conf.Validate() + if err != nil { + return err + } + + if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { + return ErrAlreadyConnected + } + + b.lock.Lock() + + if b.conn != nil { + b.lock.Unlock() + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, ErrAlreadyConnected) + return ErrAlreadyConnected + } + + go withRecover(func() { + defer b.lock.Unlock() + + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + } + + if conf.Net.TLS.Enable { + b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) + } else { + b.conn, b.connErr = dialer.Dial("tcp", b.addr) + } + if b.connErr != nil { + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) + return + } + + b.conf = conf + b.done = make(chan bool) + b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + + if b.id >= 0 { + Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + } else { + Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + } + go withRecover(b.responseReceiver) + }) + + return nil +} + +// Connected returns true if the broker is connected and false otherwise. If the broker is not +// connected but it had tried to connect, the error from that connection attempt is also returned. +func (b *Broker) Connected() (bool, error) { + b.lock.Lock() + defer b.lock.Unlock() + + return b.conn != nil, b.connErr +} + +func (b *Broker) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return ErrNotConnected + } + + close(b.responses) + <-b.done + + err := b.conn.Close() + + b.conn = nil + b.connErr = nil + b.done = nil + b.responses = nil + + atomic.StoreInt32(&b.opened, 0) + + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + + return err +} + +// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. +func (b *Broker) ID() int32 { + return b.id +} + +// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. +func (b *Broker) Addr() string { + return b.addr +} + +func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { + response := new(MetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { + response := new(ConsumerMetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { + response := new(OffsetResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { + var response *ProduceResponse + var err error + + if request.RequiredAcks == NoResponse { + err = b.sendAndReceive(request, nil) + } else { + response = new(ProduceResponse) + err = b.sendAndReceive(request, response) + } + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + response := new(FetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { + response := new(OffsetCommitResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { + response := new(OffsetFetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + if b.connErr != nil { + return nil, b.connErr + } + return nil, ErrNotConnected + } + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req) + if err != nil { + return nil, err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return nil, err + } + + _, err = b.conn.Write(buf) + if err != nil { + return nil, err + } + b.correlationID++ + + if !promiseResponse { + return nil, nil + } + + promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)} + b.responses <- promise + + return &promise, nil +} + +func (b *Broker) sendAndReceive(req requestBody, res decoder) error { + promise, err := b.send(req, res != nil) + + if err != nil { + return err + } + + if promise == nil { + return nil + } + + select { + case buf := <-promise.packets: + return decode(buf, res) + case err = <-promise.errors: + return err + } +} + +func (b *Broker) decode(pd packetDecoder) (err error) { + b.id, err = pd.getInt32() + if err != nil { + return err + } + + host, err := pd.getString() + if err != nil { + return err + } + + port, err := pd.getInt32() + if err != nil { + return err + } + + b.addr = fmt.Sprint(host, ":", port) + + return nil +} + +func (b *Broker) encode(pe packetEncoder) (err error) { + + host, portstr, err := net.SplitHostPort(b.addr) + if err != nil { + return err + } + port, err := strconv.Atoi(portstr) + if err != nil { + return err + } + + pe.putInt32(b.id) + + err = pe.putString(host) + if err != nil { + return err + } + + pe.putInt32(int32(port)) + + return nil +} + +func (b *Broker) responseReceiver() { + header := make([]byte, 8) + for response := range b.responses { + err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) + if err != nil { + response.errors <- err + continue + } + + _, err = io.ReadFull(b.conn, header) + if err != nil { + response.errors <- err + continue + } + + decodedHeader := responseHeader{} + err = decode(header, &decodedHeader) + if err != nil { + response.errors <- err + continue + } + if decodedHeader.correlationID != response.correlationID { + // TODO if decoded ID < cur ID, discard until we catch up + // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response + response.errors <- PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} + continue + } + + buf := make([]byte, decodedHeader.length-4) + _, err = io.ReadFull(b.conn, buf) + if err != nil { + // XXX: the above ReadFull call inherits the same ReadDeadline set at the top of this loop, so it may + // fail with a timeout error. If this happens, our connection is permanently toast since we will no longer + // be aligned correctly on the stream (we'll be reading garbage Kafka headers from the middle of data). + // Can we/should we fail harder in that case? + response.errors <- err + continue + } + + response.packets <- buf + } + close(b.done) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go new file mode 100644 index 000000000..df3499e49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go @@ -0,0 +1,177 @@ +package sarama + +import ( + "fmt" + "testing" +) + +func ExampleBroker() error { + broker := NewBroker("localhost:9092") + err := broker.Open(nil) + if err != nil { + return err + } + + request := MetadataRequest{Topics: []string{"myTopic"}} + response, err := broker.GetMetadata(&request) + if err != nil { + _ = broker.Close() + return err + } + + fmt.Println("There are", len(response.Topics), "topics active in the cluster.") + + return broker.Close() +} + +type mockEncoder struct { + bytes []byte +} + +func (m mockEncoder) encode(pe packetEncoder) error { + return pe.putRawBytes(m.bytes) +} + +func TestBrokerAccessors(t *testing.T) { + broker := NewBroker("abc:123") + + if broker.ID() != -1 { + t.Error("New broker didn't have an ID of -1.") + } + + if broker.Addr() != "abc:123" { + t.Error("New broker didn't have the correct address") + } + + broker.id = 34 + if broker.ID() != 34 { + t.Error("Manually setting broker ID did not take effect.") + } +} + +func TestSimpleBrokerCommunication(t *testing.T) { + mb := newMockBroker(t, 0) + defer mb.Close() + + broker := NewBroker(mb.Addr()) + err := broker.Open(nil) + if err != nil { + t.Fatal(err) + } + + for _, tt := range brokerTestTable { + mb.Returns(&mockEncoder{tt.response}) + } + for _, tt := range brokerTestTable { + tt.runner(t, broker) + } + + err = broker.Close() + if err != nil { + t.Error(err) + } +} + +// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake +var brokerTestTable = []struct { + response []byte + runner func(*testing.T, *Broker) +}{ + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := MetadataRequest{} + response, err := broker.GetMetadata(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Metadata request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := ConsumerMetadataRequest{} + response, err := broker.GetConsumerMetadata(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Consumer Metadata request got no response!") + } + }}, + + {[]byte{}, + func(t *testing.T, broker *Broker) { + request := ProduceRequest{} + request.RequiredAcks = NoResponse + response, err := broker.Produce(&request) + if err != nil { + t.Error(err) + } + if response != nil { + t.Error("Produce request with NoResponse got a response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := ProduceRequest{} + request.RequiredAcks = WaitForLocal + response, err := broker.Produce(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Produce request without NoResponse got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := FetchRequest{} + response, err := broker.Fetch(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Fetch request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := OffsetFetchRequest{} + response, err := broker.FetchOffset(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("OffsetFetch request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := OffsetCommitRequest{} + response, err := broker.CommitOffset(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("OffsetCommit request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := OffsetRequest{} + response, err := broker.GetAvailableOffsets(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Offset request got no response!") + } + }}, +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/client.go b/Godeps/_workspace/src/github.com/Shopify/sarama/client.go new file mode 100644 index 000000000..974d223e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/client.go @@ -0,0 +1,727 @@ +package sarama + +import ( + "math/rand" + "sort" + "sync" + "time" +) + +// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. +// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected +// automatically when it passes out of scope. A single client can be safely shared by +// multiple concurrent Producers and Consumers. +type Client interface { + // Config returns the Config struct of the client. This struct should not be altered after it + // has been created. + Config() *Config + + // Topics returns the set of available topics as retrieved from the cluster metadata. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + Partitions(topic string) ([]int32, error) + + // WritablePartitions returns the sorted list of all writable partition IDs for the given topic, + // where "writable" means "having a valid leader accepting writes". + WritablePartitions(topic string) ([]int32, error) + + // Leader returns the broker object that is the leader of the current topic/partition, as + // determined by querying the cluster metadata. + Leader(topic string, partitionID int32) (*Broker, error) + + // Replicas returns the set of all replica IDs for the given partition. + Replicas(topic string, partitionID int32) ([]int32, error) + + // RefreshMetadata takes a list of topics and queries the cluster to refresh the + // available metadata for those topics. If no topics are provided, it will refresh metadata + // for all topics. + RefreshMetadata(topics ...string) error + + // GetOffset queries the cluster to get the most recent available offset at the given + // time on the topic/partition combination. Time should be OffsetOldest for the earliest available + // offset, OffsetNewest for the offset of the message that will be produced next, or a time. + GetOffset(topic string, partitionID int32, time int64) (int64, error) + + // Coordinator returns the coordinating broker for a consumer group. It will return a locally cached + // value if it's available. You can call RefreshCoordinator to update the cached value. + // This function only works on Kafka 0.8.2 and higher. + Coordinator(consumerGroup string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a consumer group and stores it in local cache. + // This function only works on Kafka 0.8.2 and higher. + RefreshCoordinator(consumerGroup string) error + + // Close shuts down all broker connections managed by this client. It is required to call this function before + // a client object passes out of scope, as it will otherwise leak memory. You must close any Producers or Consumers + // using a client before you close the client. + Close() error + + // Closed returns true if the client has already had Close called on it + Closed() bool +} + +const ( + // OffsetNewest stands for the log head offset, i.e. the offset that will be assigned to the next message + // that will be produced to the partition. You can send this to a client's GetOffset method to get this + // offset, or when calling ConsumePartition to start consuming new messages. + OffsetNewest int64 = -1 + // OffsetOldest stands for the oldest offset available on the broker for a partition. You can send this + // to a client's GetOffset method to get this offset, or when calling ConsumePartition to start consuming + // from the oldest offset that is still available on the broker. + OffsetOldest int64 = -2 +) + +type client struct { + conf *Config + closer, closed chan none // for shutting down background metadata updater + + // the broker addresses given to us through the constructor are not guaranteed to be returned in + // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) + // so we store them separately + seedBrokers []*Broker + deadSeeds []*Broker + + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + + // If the number of partitions is large, we can get some churn calling cachedPartitions, + // so the result is cached. It is important to update this value whenever metadata is changed + cachedPartitionsResults map[string][maxPartitionIndex][]int32 + + lock sync.RWMutex // protects access to the maps that hold cluster state. +} + +// NewClient creates a new Client. It connects to one of the given broker addresses +// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot +// be retrieved from any of the given broker addresses, the client is not created. +func NewClient(addrs []string, conf *Config) (Client, error) { + Logger.Println("Initializing new client") + + if conf == nil { + conf = NewConfig() + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + if len(addrs) < 1 { + return nil, ConfigurationError("You must provide at least one broker address") + } + + client := &client{ + conf: conf, + closer: make(chan none), + closed: make(chan none), + brokers: make(map[int32]*Broker), + metadata: make(map[string]map[int32]*PartitionMetadata), + cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), + coordinators: make(map[string]int32), + } + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } + + // do an initial fetch of all cluster metadata by specifing an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } + go withRecover(client.backgroundMetadataUpdater) + + Logger.Println("Successfully initialized new client") + + return client, nil +} + +func (client *client) Config() *Config { + return client.conf +} + +func (client *client) Close() error { + if client.Closed() { + // Chances are this is being called from a defer() and the error will go unobserved + // so we go ahead and log the event in this case. + Logger.Printf("Close() called on already closed client") + return ErrClosedClient + } + + // shutdown and wait for the background thread before we take the lock, to avoid races + close(client.closer) + <-client.closed + + client.lock.Lock() + defer client.lock.Unlock() + Logger.Println("Closing Client") + + for _, broker := range client.brokers { + safeAsyncClose(broker) + } + + for _, broker := range client.seedBrokers { + safeAsyncClose(broker) + } + + client.brokers = nil + client.metadata = nil + + return nil +} + +func (client *client) Closed() bool { + return client.brokers == nil +} + +func (client *client) Topics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadata)) + for topic := range client.metadata { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) Partitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, allPartitions) + + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, allPartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) WritablePartitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, writablePartitions) + + // len==0 catches when it's nil (no such topic) and the odd case when every single + // partition is undergoing leader election simultaneously. Callers have to be able to handle + // this function returning an empty slice (which is a valid return value) but catching it + // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers + // a metadata refresh as a nicety so callers can just try again and don't have to manually + // trigger a refresh (otherwise they'd just keep getting a stale cached copy). + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, writablePartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return nil, metadata.Err + } + return dupeAndSort(metadata.Replicas), nil +} + +func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + leader, err := client.cachedLeader(topic, partitionID) + + if leader == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + leader, err = client.cachedLeader(topic, partitionID) + } + + return leader, err +} + +func (client *client) RefreshMetadata(topics ...string) error { + if client.Closed() { + return ErrClosedClient + } + + // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper + // error. This handles the case by returning an error instead of sending it + // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + for _, topic := range topics { + if len(topic) == 0 { + return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return + } + } + + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) +} + +func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { + if client.Closed() { + return -1, ErrClosedClient + } + + offset, err := client.getOffset(topic, partitionID, time) + + if err != nil { + if err := client.RefreshMetadata(topic); err != nil { + return -1, err + } + return client.getOffset(topic, partitionID, time) + } + + return offset, err +} + +func (client *client) Coordinator(consumerGroup string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedCoordinator(consumerGroup) + + if coordinator == nil { + if err := client.RefreshCoordinator(consumerGroup); err != nil { + return nil, err + } + coordinator = client.cachedCoordinator(consumerGroup) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshCoordinator(consumerGroup string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.coordinators[consumerGroup] = response.Coordinator.ID() + return nil +} + +// private broker management helpers + +// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered +// in the brokers map. It returns the broker that is registered, which may be the provided broker, +// or a previously registered Broker instance. You must hold the write lock before calling this function. +func (client *client) registerBroker(broker *Broker) { + if client.brokers[broker.ID()] == nil { + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } +} + +// deregisterBroker removes a broker from the seedsBroker list, and if it's +// not the seedbroker, removes it from brokers map completely. +func (client *client) deregisterBroker(broker *Broker) { + client.lock.Lock() + defer client.lock.Unlock() + + if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { + client.deadSeeds = append(client.deadSeeds, broker) + client.seedBrokers = client.seedBrokers[1:] + } else { + // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, + // but we really shouldn't have to; once that loop is made better this case can be + // removed, and the function generally can be renamed from `deregisterBroker` to + // `nextSeedBroker` or something + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + } +} + +func (client *client) resurrectDeadBrokers() { + client.lock.Lock() + defer client.lock.Unlock() + + Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) + client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) + client.deadSeeds = nil +} + +func (client *client) any() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + // not guaranteed to be random *or* deterministic + for _, broker := range client.brokers { + _ = broker.Open(client.conf) + return broker + } + + return nil +} + +// private caching/lazy metadata helpers + +type partitionType int + +const ( + allPartitions partitionType = iota + writablePartitions + // If you add any more types, update the partition cache in update() + + // Ensure this is the last partition type value + maxPartitionIndex +) + +func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + return partitions[partitionID] + } + + return nil +} + +func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions, exists := client.cachedPartitionsResults[topic] + + if !exists { + return nil + } + return partitions[partitionSet] +} + +func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { + partitions := client.metadata[topic] + + if partitions == nil { + return nil + } + + ret := make([]int32, 0, len(partitions)) + for _, partition := range partitions { + if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + continue + } + ret = append(ret, partition.ID) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + metadata, ok := partitions[partitionID] + if ok { + if metadata.Err == ErrLeaderNotAvailable { + return nil, ErrLeaderNotAvailable + } + b := client.brokers[metadata.Leader] + if b == nil { + return nil, ErrLeaderNotAvailable + } + _ = b.Open(client.conf) + return b, nil + } + } + + return nil, ErrUnknownTopicOrPartition +} + +func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { + broker, err := client.Leader(topic, partitionID) + if err != nil { + return -1, err + } + + request := &OffsetRequest{} + request.AddBlock(topic, partitionID, time, 1) + + response, err := broker.GetAvailableOffsets(request) + if err != nil { + _ = broker.Close() + return -1, err + } + + block := response.GetBlock(topic, partitionID) + if block == nil { + _ = broker.Close() + return -1, ErrIncompleteResponse + } + if block.Err != ErrNoError { + return -1, block.Err + } + if len(block.Offsets) != 1 { + return -1, ErrOffsetOutOfRange + } + + return block.Offsets[0], nil +} + +// core metadata update logic + +func (client *client) backgroundMetadataUpdater() { + defer close(client.closed) + + if client.conf.Metadata.RefreshFrequency == time.Duration(0) { + return + } + + ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := client.RefreshMetadata(); err != nil { + Logger.Println("Client background metadata update:", err) + } + case <-client.closer: + return + } + } +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { + retry := func(err error) error { + if attemptsRemaining > 0 { + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.tryRefreshMetadata(topics, attemptsRemaining-1) + } + return err + } + + for broker := client.any(); broker != nil; broker = client.any() { + if len(topics) > 0 { + Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + } else { + Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + } + response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) + + switch err.(type) { + case nil: + // valid response, use it + if shouldRetry, err := client.updateMetadata(response); shouldRetry { + Logger.Println("client/metadata found some partitions to be leaderless") + return retry(err) // note: err can be nil + } else { + return err + } + + case PacketEncodingError: + // didn't even send, return the error + return err + default: + // some other error, remove that broker and try again + Logger.Println("client/metadata got error from broker while fetching metadata:", err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + + Logger.Println("client/metadata no available broker to send metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable +func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { + client.lock.Lock() + defer client.lock.Unlock() + + // For all the brokers we received: + // - if it is a new ID, save it + // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - otherwise ignore it, replacing our existing one would just bounce the connection + for _, broker := range data.Brokers { + client.registerBroker(broker) + } + + for _, topic := range data.Topics { + delete(client.metadata, topic.Name) + delete(client.cachedPartitionsResults, topic.Name) + + switch topic.Err { + case ErrNoError: + break + case ErrInvalidTopic: // don't retry, don't store partial results + err = topic.Err + continue + case ErrUnknownTopicOrPartition: // retry, do not store partial partition results + err = topic.Err + retry = true + continue + case ErrLeaderNotAvailable: // retry, but store partial partition results + retry = true + break + default: // don't retry, don't store partial results + Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) + err = topic.Err + continue + } + + client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) + for _, partition := range topic.Partitions { + client.metadata[topic.Name][partition.ID] = partition + if partition.Err == ErrLeaderNotAvailable { + retry = true + } + } + + var partitionCache [maxPartitionIndex][]int32 + partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) + partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) + client.cachedPartitionsResults[topic.Name] = partitionCache + } + + return +} + +func (client *client) cachedCoordinator(consumerGroup string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.coordinators[consumerGroup]; !ok { + return nil + } else { + return client.brokers[coordinatorID] + } +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { + retry := func(err error) (*ConsumerMetadataResponse, error) { + if attemptsRemaining > 0 { + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + } + return nil, err + } + + for broker := client.any(); broker != nil; broker = client.any() { + Logger.Printf("client/coordinator requesting coordinator for consumergoup %s from %s\n", consumerGroup, broker.Addr()) + + request := new(ConsumerMetadataRequest) + request.ConsumerGroup = consumerGroup + + response, err := broker.GetConsumerMetadata(request) + + if err != nil { + Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) + + switch err.(type) { + case PacketEncodingError: + return nil, err + default: + _ = broker.Close() + client.deregisterBroker(broker) + continue + } + } + + switch response.Err { + case ErrNoError: + Logger.Printf("client/coordinator coordinator for consumergoup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + return response, nil + + case ErrConsumerCoordinatorNotAvailable: + Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + + // This is very ugly, but this scenario will only happen once per cluster. + // The __consumer_offsets topic only has to be created one time. + // The number of partitions not configurable, but partition 0 should always exist. + if _, err := client.Leader("__consumer_offsets", 0); err != nil { + Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + + return retry(ErrConsumerCoordinatorNotAvailable) + default: + return nil, response.Err + } + } + + Logger.Println("client/coordinator no available broker to send consumer metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go new file mode 100644 index 000000000..f84b9af31 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go @@ -0,0 +1,608 @@ +package sarama + +import ( + "io" + "sync" + "testing" + "time" +) + +func safeClose(t testing.TB, c io.Closer) { + err := c.Close() + if err != nil { + t.Error(err) + } +} + +func TestSimpleClient(t *testing.T) { + seedBroker := newMockBroker(t, 1) + + seedBroker.Returns(new(MetadataResponse)) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestCachedPartitions(t *testing.T) { + seedBroker := newMockBroker(t, 1) + + replicas := []int32{3, 1, 5} + isr := []int32{5, 1} + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker("localhost:12345", 2) + metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + c, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + client := c.(*client) + + // Verify they aren't cached the same + allP := client.cachedPartitionsResults["my_topic"][allPartitions] + writeP := client.cachedPartitionsResults["my_topic"][writablePartitions] + if len(allP) == len(writeP) { + t.Fatal("Invalid lengths!") + } + + tmp := client.cachedPartitionsResults["my_topic"] + // Verify we actually use the cache at all! + tmp[allPartitions] = []int32{1, 2, 3, 4} + client.cachedPartitionsResults["my_topic"] = tmp + if 4 != len(client.cachedPartitions("my_topic", allPartitions)) { + t.Fatal("Not using the cache!") + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) { + seedBroker := newMockBroker(t, 1) + + replicas := []int32{seedBroker.BrokerID()} + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + metadataResponse = new(MetadataResponse) + metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataResponse) + + partitions, err := client.Partitions("unknown") + + if err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + if partitions != nil { + t.Errorf("Should return nil as partition list, found %v", partitions) + } + + // Should still use the cache of a known topic + partitions, err = client.Partitions("my_topic") + if err != nil { + t.Errorf("Expected no error, found %v", err) + } + + metadataResponse = new(MetadataResponse) + metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataResponse) + + // Should not use cache for unknown topic + partitions, err = client.Partitions("unknown") + if err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + if partitions != nil { + t.Errorf("Should return nil as partition list, found %v", partitions) + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestClientSeedBrokers(t *testing.T) { + seedBroker := newMockBroker(t, 1) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker("localhost:12345", 2) + seedBroker.Returns(metadataResponse) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestClientMetadata(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 5) + + replicas := []int32{3, 1, 5} + isr := []int32{5, 1} + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + topics, err := client.Topics() + if err != nil { + t.Error(err) + } else if len(topics) != 1 || topics[0] != "my_topic" { + t.Error("Client returned incorrect topics:", topics) + } + + parts, err := client.Partitions("my_topic") + if err != nil { + t.Error(err) + } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { + t.Error("Client returned incorrect partitions for my_topic:", parts) + } + + parts, err = client.WritablePartitions("my_topic") + if err != nil { + t.Error(err) + } else if len(parts) != 1 || parts[0] != 0 { + t.Error("Client returned incorrect writable partitions for my_topic:", parts) + } + + tst, err := client.Leader("my_topic", 0) + if err != nil { + t.Error(err) + } else if tst.ID() != 5 { + t.Error("Leader for my_topic had incorrect ID.") + } + + replicas, err = client.Replicas("my_topic", 0) + if err != nil { + t.Error(err) + } else if replicas[0] != 1 { + t.Error("Incorrect (or unsorted) replica") + } else if replicas[1] != 3 { + t.Error("Incorrect (or unsorted) replica") + } else if replicas[2] != 5 { + t.Error("Incorrect (or unsorted) replica") + } + + leader.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientGetOffset(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + leaderAddr := leader.Addr() + + metadata := new(MetadataResponse) + metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError) + metadata.AddBroker(leaderAddr, leader.BrokerID()) + seedBroker.Returns(metadata) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + offsetResponse := new(OffsetResponse) + offsetResponse.AddTopicPartition("foo", 0, 123) + leader.Returns(offsetResponse) + + offset, err := client.GetOffset("foo", 0, OffsetNewest) + if err != nil { + t.Error(err) + } + if offset != 123 { + t.Error("Unexpected offset, got ", offset) + } + + leader.Close() + seedBroker.Returns(metadata) + + leader = newMockBrokerAddr(t, 2, leaderAddr) + offsetResponse = new(OffsetResponse) + offsetResponse.AddTopicPartition("foo", 0, 456) + leader.Returns(offsetResponse) + + offset, err = client.GetOffset("foo", 0, OffsetNewest) + if err != nil { + t.Error(err) + } + if offset != 456 { + t.Error("Unexpected offset, got ", offset) + } + + seedBroker.Close() + leader.Close() + safeClose(t, client) +} + +func TestClientReceivingUnknownTopic(t *testing.T) { + seedBroker := newMockBroker(t, 1) + + metadataResponse1 := new(MetadataResponse) + seedBroker.Returns(metadataResponse1) + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Metadata.Retry.Backoff = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + metadataUnknownTopic := new(MetadataResponse) + metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataUnknownTopic) + seedBroker.Returns(metadataUnknownTopic) + + if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition { + t.Error("ErrUnknownTopicOrPartition expected, got", err) + } + + // If we are asking for the leader of a partition of the non-existing topic. + // we will request metadata again. + seedBroker.Returns(metadataUnknownTopic) + seedBroker.Returns(metadataUnknownTopic) + + if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + safeClose(t, client) + seedBroker.Close() +} + +func TestClientReceivingPartialMetadata(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 5) + + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) + seedBroker.Returns(metadataResponse1) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} + + metadataPartial := new(MetadataResponse) + metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) + metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError) + metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable) + seedBroker.Returns(metadataPartial) + + if err := client.RefreshMetadata("new_topic"); err != nil { + t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error") + } + + // Even though the metadata was incomplete, we should be able to get the leader of a partition + // for which we did get a useful response, without doing additional requests. + + partition0Leader, err := client.Leader("new_topic", 0) + if err != nil { + t.Error(err) + } else if partition0Leader.Addr() != leader.Addr() { + t.Error("Unexpected leader returned", partition0Leader.Addr()) + } + + // If we are asking for the leader of a partition that didn't have a leader before, + // we will do another metadata request. + + seedBroker.Returns(metadataPartial) + + // Still no leader for the partition, so asking for it should return an error. + _, err = client.Leader("new_topic", 1) + if err != ErrLeaderNotAvailable { + t.Error("Expected ErrLeaderNotAvailable, got", err) + } + + safeClose(t, client) + seedBroker.Close() + leader.Close() +} + +func TestClientRefreshBehaviour(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 5) + + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) + seedBroker.Returns(metadataResponse1) + + metadataResponse2 := new(MetadataResponse) + metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse2) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + parts, err := client.Partitions("my_topic") + if err != nil { + t.Error(err) + } else if len(parts) != 1 || parts[0] != 0xb { + t.Error("Client returned incorrect partitions for my_topic:", parts) + } + + tst, err := client.Leader("my_topic", 0xb) + if err != nil { + t.Error(err) + } else if tst.ID() != 5 { + t.Error("Leader for my_topic had incorrect ID.") + } + + leader.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientResurrectDeadSeeds(t *testing.T) { + initialSeed := newMockBroker(t, 0) + emptyMetadata := new(MetadataResponse) + initialSeed.Returns(emptyMetadata) + + conf := NewConfig() + conf.Metadata.Retry.Backoff = 0 + conf.Metadata.RefreshFrequency = 0 + c, err := NewClient([]string{initialSeed.Addr()}, conf) + if err != nil { + t.Fatal(err) + } + initialSeed.Close() + + client := c.(*client) + + seed1 := newMockBroker(t, 1) + seed2 := newMockBroker(t, 2) + seed3 := newMockBroker(t, 3) + addr1 := seed1.Addr() + addr2 := seed2.Addr() + addr3 := seed3.Addr() + + // Overwrite the seed brokers with a fixed ordering to make this test deterministic. + safeClose(t, client.seedBrokers[0]) + client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)} + client.deadSeeds = []*Broker{} + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + if err := client.RefreshMetadata(); err != nil { + t.Error(err) + } + wg.Done() + }() + seed1.Close() + seed2.Close() + + seed1 = newMockBrokerAddr(t, 1, addr1) + seed2 = newMockBrokerAddr(t, 2, addr2) + + seed3.Close() + + seed1.Close() + seed2.Returns(emptyMetadata) + + wg.Wait() + + if len(client.seedBrokers) != 2 { + t.Error("incorrect number of live seeds") + } + if len(client.deadSeeds) != 1 { + t.Error("incorrect number of dead seeds") + } + + safeClose(t, c) +} + +func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) { + seedBroker := newMockBroker(t, 1) + staleCoordinator := newMockBroker(t, 2) + freshCoordinator := newMockBroker(t, 3) + + replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()} + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID()) + metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID()) + metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) + seedBroker.Returns(metadataResponse1) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + coordinatorResponse1 := new(ConsumerMetadataResponse) + coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable + seedBroker.Returns(coordinatorResponse1) + + coordinatorResponse2 := new(ConsumerMetadataResponse) + coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID() + coordinatorResponse2.CoordinatorHost = "127.0.0.1" + coordinatorResponse2.CoordinatorPort = staleCoordinator.Port() + + seedBroker.Returns(coordinatorResponse2) + + broker, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if staleCoordinator.Addr() != broker.Addr() { + t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr()) + } + + if staleCoordinator.BrokerID() != broker.ID() { + t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID()) + } + + // Grab the cached value + broker2, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if broker2.Addr() != broker.Addr() { + t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) + } + + coordinatorResponse3 := new(ConsumerMetadataResponse) + coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID() + coordinatorResponse3.CoordinatorHost = "127.0.0.1" + coordinatorResponse3.CoordinatorPort = freshCoordinator.Port() + + seedBroker.Returns(coordinatorResponse3) + + // Refresh the locally cahced value because it's stale + if err := client.RefreshCoordinator("my_group"); err != nil { + t.Error(err) + } + + // Grab the fresh value + broker3, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if broker3.Addr() != freshCoordinator.Addr() { + t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr()) + } + + freshCoordinator.Close() + staleCoordinator.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) { + seedBroker := newMockBroker(t, 1) + coordinator := newMockBroker(t, 2) + + metadataResponse1 := new(MetadataResponse) + seedBroker.Returns(metadataResponse1) + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Metadata.Retry.Backoff = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + coordinatorResponse1 := new(ConsumerMetadataResponse) + coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable + seedBroker.Returns(coordinatorResponse1) + + metadataResponse2 := new(MetadataResponse) + metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataResponse2) + + replicas := []int32{coordinator.BrokerID()} + metadataResponse3 := new(MetadataResponse) + metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) + seedBroker.Returns(metadataResponse3) + + coordinatorResponse2 := new(ConsumerMetadataResponse) + coordinatorResponse2.CoordinatorID = coordinator.BrokerID() + coordinatorResponse2.CoordinatorHost = "127.0.0.1" + coordinatorResponse2.CoordinatorPort = coordinator.Port() + + seedBroker.Returns(coordinatorResponse2) + + broker, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if coordinator.Addr() != broker.Addr() { + t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) + } + + if coordinator.BrokerID() != broker.ID() { + t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) + } + + coordinator.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientAutorefreshShutdownRace(t *testing.T) { + seedBroker := newMockBroker(t, 1) + + metadataResponse := new(MetadataResponse) + seedBroker.Returns(metadataResponse) + + conf := NewConfig() + conf.Metadata.RefreshFrequency = 100 * time.Millisecond + client, err := NewClient([]string{seedBroker.Addr()}, conf) + if err != nil { + t.Fatal(err) + } + + // Wait for the background refresh to kick in + time.Sleep(110 * time.Millisecond) + + done := make(chan none) + go func() { + // Close the client + if err := client.Close(); err != nil { + t.Fatal(err) + } + close(done) + }() + + // Wait for the Close to kick in + time.Sleep(10 * time.Millisecond) + + // Then return some metadata to the still-running background thread + leader := newMockBroker(t, 2) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError) + seedBroker.Returns(metadataResponse) + + <-done + + seedBroker.Close() + + // give the update time to happen so we get a panic if it's still running (which it shouldn't) + time.Sleep(10 * time.Millisecond) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/config.go b/Godeps/_workspace/src/github.com/Shopify/sarama/config.go new file mode 100644 index 000000000..0fae111e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/config.go @@ -0,0 +1,275 @@ +package sarama + +import ( + "crypto/tls" + "time" +) + +// Config is used to pass multiple configuration options to Sarama's constructors. +type Config struct { + // Net is the namespace for network-level properties used by the Broker, and shared by the Client/Producer/Consumer. + Net struct { + MaxOpenRequests int // How many outstanding requests a connection is allowed to have before sending on it blocks (default 5). + + // All three of the below configurations are similar to the `socket.timeout.ms` setting in JVM kafka. + DialTimeout time.Duration // How long to wait for the initial connection to succeed before timing out and returning an error (default 30s). + ReadTimeout time.Duration // How long to wait for a response before timing out and returning an error (default 30s). + WriteTimeout time.Duration // How long to wait for a transmit to succeed before timing out and returning an error (default 30s). + + // NOTE: these config values have no compatibility guarantees; they may change when Kafka releases its + // official TLS support in version 0.9. + TLS struct { + Enable bool // Whether or not to use TLS when connecting to the broker (defaults to false). + Config *tls.Config // The TLS configuration to use for secure connections if enabled (defaults to nil). + } + + // KeepAlive specifies the keep-alive period for an active network connection. + // If zero, keep-alives are disabled. (default is 0: disabled). + KeepAlive time.Duration + } + + // Metadata is the namespace for metadata management properties used by the Client, and shared by the Producer/Consumer. + Metadata struct { + Retry struct { + Max int // The total number of times to retry a metadata request when the cluster is in the middle of a leader election (default 3). + Backoff time.Duration // How long to wait for leader election to occur before retrying (default 250ms). Similar to the JVM's `retry.backoff.ms`. + } + // How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. + // Set to 0 to disable. Similar to `topic.metadata.refresh.interval.ms` in the JVM version. + RefreshFrequency time.Duration + } + + // Producer is the namespace for configuration related to producing messages, used by the Producer. + Producer struct { + // The maximum permitted size of a message (defaults to 1000000). Should be set equal to or smaller than the broker's `message.max.bytes`. + MaxMessageBytes int + // The level of acknowledgement reliability needed from the broker (defaults to WaitForLocal). + // Equivalent to the `request.required.acks` setting of the JVM producer. + RequiredAcks RequiredAcks + // The maximum duration the broker will wait the receipt of the number of RequiredAcks (defaults to 10 seconds). + // This is only relevant when RequiredAcks is set to WaitForAll or a number > 1. Only supports millisecond resolution, + // nanoseconds will be truncated. Equivalent to the JVM producer's `request.timeout.ms` setting. + Timeout time.Duration + // The type of compression to use on messages (defaults to no compression). Similar to `compression.codec` setting of the JVM producer. + Compression CompressionCodec + // Generates partitioners for choosing the partition to send messages to (defaults to hashing the message key). + // Similar to the `partitioner.class` setting for the JVM producer. + Partitioner PartitionerConstructor + + // Return specifies what channels will be populated. If they are set to true, you must read from + // the respective channels to prevent deadlock. + Return struct { + // If enabled, successfully delivered messages will be returned on the Successes channel (default disabled). + Successes bool + + // If enabled, messages that failed to deliver will be returned on the Errors channel, including error (default enabled). + Errors bool + } + + // The following config options control how often messages are batched up and sent to the broker. By default, + // messages are sent as fast as possible, and all messages received while the current batch is in-flight are placed + // into the subsequent batch. + Flush struct { + Bytes int // The best-effort number of bytes needed to trigger a flush. Use the global sarama.MaxRequestSize to set a hard upper limit. + Messages int // The best-effort number of messages needed to trigger a flush. Use `MaxMessages` to set a hard upper limit. + Frequency time.Duration // The best-effort frequency of flushes. Equivalent to `queue.buffering.max.ms` setting of JVM producer. + // The maximum number of messages the producer will send in a single broker request. + // Defaults to 0 for unlimited. Similar to `queue.buffering.max.messages` in the JVM producer. + MaxMessages int + } + + Retry struct { + // The total number of times to retry sending a message (default 3). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries (default 100ms). + // Similar to the `retry.backoff.ms` setting of the JVM producer. + Backoff time.Duration + } + } + + // Consumer is the namespace for configuration related to consuming messages, used by the Consumer. + Consumer struct { + Retry struct { + // How long to wait after a failing to read from a partition before trying again (default 2s). + Backoff time.Duration + } + + // Fetch is the namespace for controlling how many bytes are retrieved by any given request. + Fetch struct { + // The minimum number of message bytes to fetch in a request - the broker will wait until at least this many are available. + // The default is 1, as 0 causes the consumer to spin when no messages are available. Equivalent to the JVM's `fetch.min.bytes`. + Min int32 + // The default number of message bytes to fetch from the broker in each request (default 32768). This should be larger than the + // majority of your messages, or else the consumer will spend a lot of time negotiating sizes and not actually consuming. Similar + // to the JVM's `fetch.message.max.bytes`. + Default int32 + // The maximum number of message bytes to fetch from the broker in a single request. Messages larger than this will return + // ErrMessageTooLarge and will not be consumable, so you must be sure this is at least as large as your largest message. + // Defaults to 0 (no limit). Similar to the JVM's `fetch.message.max.bytes`. The global `sarama.MaxResponseSize` still applies. + Max int32 + } + // The maximum amount of time the broker will wait for Consumer.Fetch.Min bytes to become available before it + // returns fewer than that anyways. The default is 250ms, since 0 causes the consumer to spin when no events are available. + // 100-500ms is a reasonable range for most cases. Kafka only supports precision up to milliseconds; nanoseconds will be truncated. + // Equivalent to the JVM's `fetch.wait.max.ms`. + MaxWaitTime time.Duration + + // The maximum amount of time the consumer expects a message takes to process for the user. If writing to the Messages channel + // takes longer than this, that partition will stop fetching more messages until it can proceed again. Note that, since the + // Messages channel is buffered, the actual grace time is (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + MaxProcessingTime time.Duration + + // Return specifies what channels will be populated. If they are set to true, you must read from + // them to prevent deadlock. + Return struct { + // If enabled, any errors that occured while consuming are returned on the Errors channel (default disabled). + Errors bool + } + } + + // A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes. + // Defaults to "sarama", but you should probably set it to something specific to your application. + ClientID string + // The number of events to buffer in internal and external channels. This permits the producer and consumer to + // continue processing some messages in the background while user code is working, greatly improving throughput. + // Defaults to 256. + ChannelBufferSize int +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{} + + c.Net.MaxOpenRequests = 5 + c.Net.DialTimeout = 30 * time.Second + c.Net.ReadTimeout = 30 * time.Second + c.Net.WriteTimeout = 30 * time.Second + + c.Metadata.Retry.Max = 3 + c.Metadata.Retry.Backoff = 250 * time.Millisecond + c.Metadata.RefreshFrequency = 10 * time.Minute + + c.Producer.MaxMessageBytes = 1000000 + c.Producer.RequiredAcks = WaitForLocal + c.Producer.Timeout = 10 * time.Second + c.Producer.Partitioner = NewHashPartitioner + c.Producer.Retry.Max = 3 + c.Producer.Retry.Backoff = 100 * time.Millisecond + c.Producer.Return.Errors = true + + c.Consumer.Fetch.Min = 1 + c.Consumer.Fetch.Default = 32768 + c.Consumer.Retry.Backoff = 2 * time.Second + c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxProcessingTime = 100 * time.Millisecond + c.Consumer.Return.Errors = false + + c.ChannelBufferSize = 256 + + return c +} + +// Validate checks a Config instance. It will return a +// ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + // some configuration values should be warned on but not fail completely, do those first + if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") + } + if c.Producer.RequiredAcks > 1 { + Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") + } + if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { + Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") + } + if c.Producer.Flush.Bytes >= int(MaxRequestSize) { + Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") + } + if c.Producer.Timeout%time.Millisecond != 0 { + Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") + } + if c.Consumer.MaxWaitTime < 100*time.Millisecond { + Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") + } + if c.Consumer.MaxWaitTime%time.Millisecond != 0 { + Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") + } + if c.ClientID == "sarama" { + Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") + } + + // validate Net values + switch { + case c.Net.MaxOpenRequests <= 0: + return ConfigurationError("Net.MaxOpenRequests must be > 0") + case c.Net.DialTimeout <= 0: + return ConfigurationError("Net.DialTimeout must be > 0") + case c.Net.ReadTimeout <= 0: + return ConfigurationError("Net.ReadTimeout must be > 0") + case c.Net.WriteTimeout <= 0: + return ConfigurationError("Net.WriteTimeout must be > 0") + case c.Net.KeepAlive < 0: + return ConfigurationError("Net.KeepAlive must be >= 0") + } + + // validate the Metadata values + switch { + case c.Metadata.Retry.Max < 0: + return ConfigurationError("Metadata.Retry.Max must be >= 0") + case c.Metadata.Retry.Backoff < 0: + return ConfigurationError("Metadata.Retry.Backoff must be >= 0") + case c.Metadata.RefreshFrequency < 0: + return ConfigurationError("Metadata.RefreshFrequency must be >= 0") + } + + // validate the Producer values + switch { + case c.Producer.MaxMessageBytes <= 0: + return ConfigurationError("Producer.MaxMessageBytes must be > 0") + case c.Producer.RequiredAcks < -1: + return ConfigurationError("Producer.RequiredAcks must be >= -1") + case c.Producer.Timeout <= 0: + return ConfigurationError("Producer.Timeout must be > 0") + case c.Producer.Partitioner == nil: + return ConfigurationError("Producer.Partitioner must not be nil") + case c.Producer.Flush.Bytes < 0: + return ConfigurationError("Producer.Flush.Bytes must be >= 0") + case c.Producer.Flush.Messages < 0: + return ConfigurationError("Producer.Flush.Messages must be >= 0") + case c.Producer.Flush.Frequency < 0: + return ConfigurationError("Producer.Flush.Frequency must be >= 0") + case c.Producer.Flush.MaxMessages < 0: + return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") + case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: + return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") + case c.Producer.Retry.Max < 0: + return ConfigurationError("Producer.Retry.Max must be >= 0") + case c.Producer.Retry.Backoff < 0: + return ConfigurationError("Producer.Retry.Backoff must be >= 0") + } + + // validate the Consumer values + switch { + case c.Consumer.Fetch.Min <= 0: + return ConfigurationError("Consumer.Fetch.Min must be > 0") + case c.Consumer.Fetch.Default <= 0: + return ConfigurationError("Consumer.Fetch.Default must be > 0") + case c.Consumer.Fetch.Max < 0: + return ConfigurationError("Consumer.Fetch.Max must be >= 0") + case c.Consumer.MaxWaitTime < 1*time.Millisecond: + return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") + case c.Consumer.MaxProcessingTime <= 0: + return ConfigurationError("Consumer.MaxProcessingTime must be > 0") + case c.Consumer.Retry.Backoff < 0: + return ConfigurationError("Consumer.Retry.Backoff must be >= 0") + } + + // validate misc shared values + switch { + case c.ChannelBufferSize < 0: + return ConfigurationError("ChannelBufferSize must be >= 0") + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go new file mode 100644 index 000000000..255281a65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go @@ -0,0 +1,10 @@ +package sarama + +import "testing" + +func TestDefaultConfigValidates(t *testing.T) { + config := NewConfig() + if err := config.Validate(); err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go new file mode 100644 index 000000000..43ce3b21b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go @@ -0,0 +1,676 @@ +package sarama + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Key, Value []byte + Topic string + Partition int32 + Offset int64 +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +// +// Sarama's Consumer type does not currently support automatic consumer group rebalancing and offset tracking, +// however the https://github.com/wvanbergen/kafka library builds on Sarama to add this support. We plan +// to properly integrate this functionality at a later date. +type Consumer interface { + + // Topics returns the set of available topics as retrieved from the cluster metadata. + // This method is the same as Client.Topics(), and is provided for convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Pertitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with the given offset. It will + // return an error if this Consumer is already consuming on the given topic/partition. Offset can be a + // literal offset, or OffsetNewest or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // Close shuts down the consumer. It must be called after all child PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + client Client + conf *Config + ownClient bool + + lock sync.Mutex + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + c.(*consumer).ownClient = true + return c, nil +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + if c.ownClient { + return c.client.Close() + } + return nil +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() +// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically +// when it passes out of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +type PartitionConsumer interface { + + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, + // after which you should wait until the 'messages' and 'errors' channel are drained. + // It is required to call this function, or Close before a consumer object passes out of scope, + // as it will otherwise leak memory. You must call this before calling Close on the underlying + // client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It is required to call this function + // (or AsyncClose) before a consumer object passes out of scope, as it will otherwise leak memory. You must + // call this before calling Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occured during consuming, if enabled. By default, + // errors are logged and not returned over this channel. If you want to implement any custom errpr + // handling, set your config's Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, i.e. the offset that will + // be used for the next message that will be produced. You can use this to determine how far behind + // the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + consumer *consumer + conf *Config + topic string + partition int32 + + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + trigger, dying chan none + responseResult error + + fetchSize int32 + offset int64 + highWaterMarkOffset int64 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) dispatcher() { + for _ = range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.conf.Consumer.Retry.Backoff): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + var leader *Broker + var err error + if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(leader) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + close(child.dying) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + go withRecover(func() { + for _ = range child.messages { + // drain + } + }) + + var errors ConsumerErrors + for err := range child.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + for i, msg := range msgs { + select { + case child.messages <- msg: + case <-time.After(child.conf.Consumer.MaxProcessingTime): + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + continue feederLoop + } + } + + child.broker.acks.Done() + } + + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + if len(block.MsgSet.Messages) == 0 { + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if block.MsgSet.PartialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + incomplete := false + prelude := true + var messages []*ConsumerMessage + for _, msgBlock := range block.MsgSet.Messages { + + for _, msg := range msgBlock.Messages() { + if prelude && msg.Offset < child.offset { + continue + } + prelude = false + + if msg.Offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: msg.Offset, + }) + child.offset = msg.Offset + 1 + } else { + incomplete = true + } + } + + } + + if incomplete || len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +// brokerConsumer + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + wait chan none + subscriptions map[*partitionConsumer]none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer + // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks + // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give + // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, + // so the main goroutine can block waiting for work if it has none. + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available + for newSubscriptions := range bc.newSubscriptions { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) handleResponses() { + // handles the response codes left for us by our subscriptions, and abandons ones that have been closed + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + result := child.responseResult + child.responseResult = nil + + switch result { + case nil: + break + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscription := range bc.newSubscriptions { + for _, child := range newSubscription { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go new file mode 100644 index 000000000..9b8fcd74e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go @@ -0,0 +1,22 @@ +package sarama + +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + return pe.putString(r.ConsumerGroup) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder) (err error) { + r.ConsumerGroup, err = pd.getString() + return err +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go new file mode 100644 index 000000000..4509631a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go @@ -0,0 +1,19 @@ +package sarama + +import "testing" + +var ( + consumerMetadataRequestEmpty = []byte{ + 0x00, 0x00} + + consumerMetadataRequestString = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'} +) + +func TestConsumerMetadataRequest(t *testing.T) { + request := new(ConsumerMetadataRequest) + testRequest(t, "empty string", request, consumerMetadataRequestEmpty) + + request.ConsumerGroup = "foobar" + testRequest(t, "with string", request, consumerMetadataRequestString) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go new file mode 100644 index 000000000..d6b5614b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go @@ -0,0 +1,73 @@ +package sarama + +import ( + "net" + "strconv" +) + +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(tmp) + + coordinator := new(Broker) + if err := coordinator.decode(pd); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + r.Coordinator = coordinator + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if r.Coordinator != nil { + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + pe.putInt32(r.Coordinator.ID()) + if err := pe.putString(host); err != nil { + return err + } + pe.putInt32(int32(port)) + return nil + } + pe.putInt32(r.CoordinatorID) + if err := pe.putString(r.CoordinatorHost); err != nil { + return err + } + pe.putInt32(r.CoordinatorPort) + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go new file mode 100644 index 000000000..b748784d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go @@ -0,0 +1,35 @@ +package sarama + +import "testing" + +var ( + consumerMetadataResponseError = []byte{ + 0x00, 0x0E, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + consumerMetadataResponseSuccess = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0xAB, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0xCC, 0xDD} +) + +func TestConsumerMetadataResponseError(t *testing.T) { + response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress} + testResponse(t, "error", &response, consumerMetadataResponseError) +} + +func TestConsumerMetadataResponseSuccess(t *testing.T) { + broker := NewBroker("foo:52445") + broker.id = 0xAB + response := ConsumerMetadataResponse{ + Coordinator: broker, + CoordinatorID: 0xAB, + CoordinatorHost: "foo", + CoordinatorPort: 0xCCDD, + Err: ErrNoError, + } + testResponse(t, "success", &response, consumerMetadataResponseSuccess) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go new file mode 100644 index 000000000..df3af07ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go @@ -0,0 +1,844 @@ +package sarama + +import ( + "sync" + "testing" + "time" +) + +var testMsg = StringEncoder("Foo") + +// If a particular offset is provided then messages are consumed starting from +// that offset. +func TestConsumerOffsetManual(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + + mockFetchResponse := newMockFetchResponse(t, 1) + for i := 0; i < 10; i++ { + mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg) + } + + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 2345), + "FetchRequest": mockFetchResponse, + }) + + // When + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + consumer, err := master.ConsumePartition("my_topic", 0, 1234) + if err != nil { + t.Fatal(err) + } + + // Then: messages starting from offset 1234 are consumed. + for i := 0; i < 10; i++ { + select { + case message := <-consumer.Messages(): + assertMessageOffset(t, message, int64(i+1234)) + case err := <-consumer.Errors(): + t.Error(err) + } + } + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// If `OffsetNewest` is passed as the initial offset then the first consumed +// message is indeed corresponds to the offset that broker claims to be the +// newest in its metadata response. +func TestConsumerOffsetNewest(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 10). + SetOffset("my_topic", 0, OffsetOldest, 7), + "FetchRequest": newMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 9, testMsg). + SetMessage("my_topic", 0, 10, testMsg). + SetMessage("my_topic", 0, 11, testMsg). + SetHighWaterMark("my_topic", 0, 14), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) + if err != nil { + t.Fatal(err) + } + + // Then + assertMessageOffset(t, <-consumer.Messages(), 10) + if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 { + t.Errorf("Expected high water mark offset 14, found %d", hwmo) + } + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// It is possible to close a partition consumer and create the same anew. +func TestConsumerRecreate(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": newMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 10, testMsg), + }) + + c, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + pc, err := c.ConsumePartition("my_topic", 0, 10) + if err != nil { + t.Fatal(err) + } + assertMessageOffset(t, <-pc.Messages(), 10) + + // When + safeClose(t, pc) + pc, err = c.ConsumePartition("my_topic", 0, 10) + if err != nil { + t.Fatal(err) + } + + // Then + assertMessageOffset(t, <-pc.Messages(), 10) + + safeClose(t, pc) + safeClose(t, c) + broker0.Close() +} + +// An attempt to consume the same partition twice should fail. +func TestConsumerDuplicate(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": newMockFetchResponse(t, 1), + }) + + config := NewConfig() + config.ChannelBufferSize = 0 + c, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + pc1, err := c.ConsumePartition("my_topic", 0, 0) + if err != nil { + t.Fatal(err) + } + + // When + pc2, err := c.ConsumePartition("my_topic", 0, 0) + + // Then + if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") { + t.Fatal("A partition cannot be consumed twice at the same time") + } + + safeClose(t, pc1) + safeClose(t, c) + broker0.Close() +} + +// If consumer fails to refresh metadata it keeps retrying with frequency +// specified by `Config.Consumer.Retry.Backoff`. +func TestConsumerLeaderRefreshError(t *testing.T) { + // Given + broker0 := newMockBroker(t, 100) + + // Stage 1: my_topic/0 served by broker0 + Logger.Printf(" STAGE 1") + + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 123). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": newMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 123, testMsg), + }) + + config := NewConfig() + config.Net.ReadTimeout = 100 * time.Millisecond + config.Consumer.Retry.Backoff = 200 * time.Millisecond + config.Consumer.Return.Errors = true + config.Metadata.Retry.Max = 0 + c, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) + if err != nil { + t.Fatal(err) + } + + assertMessageOffset(t, <-pc.Messages(), 123) + + // Stage 2: broker0 says that it is no longer the leader for my_topic/0, + // but the requests to retrieve metadata fail with network timeout. + Logger.Printf(" STAGE 2") + + fetchResponse2 := &FetchResponse{} + fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) + + broker0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": newMockWrapper(fetchResponse2), + }) + + if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { + t.Errorf("Unexpected error: %v", consErr.Err) + } + + // Stage 3: finally the metadata returned by broker0 tells that broker1 is + // a new leader for my_topic/0. Consumption resumes. + + Logger.Printf(" STAGE 3") + + broker1 := newMockBroker(t, 101) + + broker1.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": newMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 124, testMsg), + }) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetBroker(broker1.Addr(), broker1.BrokerID()). + SetLeader("my_topic", 0, broker1.BrokerID()), + }) + + assertMessageOffset(t, <-pc.Messages(), 124) + + safeClose(t, pc) + safeClose(t, c) + broker1.Close() + broker0.Close() +} + +func TestConsumerInvalidTopic(t *testing.T) { + // Given + broker0 := newMockBroker(t, 100) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()), + }) + + c, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) + + // Then + if pc != nil || err != ErrUnknownTopicOrPartition { + t.Errorf("Should fail with, err=%v", err) + } + + safeClose(t, c) + broker0.Close() +} + +// Nothing bad happens if a partition consumer that has no leader assigned at +// the moment is closed. +func TestConsumerClosePartitionWithoutLeader(t *testing.T) { + // Given + broker0 := newMockBroker(t, 100) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 123). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": newMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 123, testMsg), + }) + + config := NewConfig() + config.Net.ReadTimeout = 100 * time.Millisecond + config.Consumer.Retry.Backoff = 100 * time.Millisecond + config.Consumer.Return.Errors = true + config.Metadata.Retry.Max = 0 + c, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) + if err != nil { + t.Fatal(err) + } + + assertMessageOffset(t, <-pc.Messages(), 123) + + // broker0 says that it is no longer the leader for my_topic/0, but the + // requests to retrieve metadata fail with network timeout. + fetchResponse2 := &FetchResponse{} + fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) + + broker0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": newMockWrapper(fetchResponse2), + }) + + // When + if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { + t.Errorf("Unexpected error: %v", consErr.Err) + } + + // Then: the partition consumer can be closed without any problem. + safeClose(t, pc) + safeClose(t, c) + broker0.Close() +} + +// If the initial offset passed on partition consumer creation is out of the +// actual offset range for the partition, then the partition consumer stops +// immediately closing its output channels. +func TestConsumerShutsDownOutOfRange(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + broker0.SetHandler(func(req *request) (res encoder) { + switch reqBody := req.body.(type) { + case *MetadataRequest: + return newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()). + For(reqBody) + case *OffsetRequest: + return newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 7). + For(reqBody) + case *FetchRequest: + fetchResponse := new(FetchResponse) + fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) + return fetchResponse + } + return nil + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 101) + if err != nil { + t.Fatal(err) + } + + // Then: consumer should shut down closing its messages and errors channels. + if _, ok := <-consumer.Messages(); ok { + t.Error("Expected the consumer to shut down") + } + safeClose(t, consumer) + + safeClose(t, master) + broker0.Close() +} + +// If a fetch response contains messages with offsets that are smaller then +// requested, then such messages are ignored. +func TestConsumerExtraOffsets(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + called := 0 + broker0.SetHandler(func(req *request) (res encoder) { + switch req.body.(type) { + case *MetadataRequest: + return newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body) + case *OffsetRequest: + return newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body) + case *FetchRequest: + fetchResponse := &FetchResponse{} + called++ + if called > 1 { + fetchResponse.AddError("my_topic", 0, ErrNoError) + return fetchResponse + } + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1) + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2) + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3) + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4) + return fetchResponse + } + return nil + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 3) + if err != nil { + t.Fatal(err) + } + + // Then: messages with offsets 1 and 2 are not returned even though they + // are present in the response. + assertMessageOffset(t, <-consumer.Messages(), 3) + assertMessageOffset(t, <-consumer.Messages(), 4) + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// It is fine if offsets of fetched messages are not sequential (although +// strictly increasing!). +func TestConsumerNonSequentialOffsets(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + called := 0 + broker0.SetHandler(func(req *request) (res encoder) { + switch req.body.(type) { + case *MetadataRequest: + return newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body) + case *OffsetRequest: + return newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body) + case *FetchRequest: + called++ + fetchResponse := &FetchResponse{} + if called > 1 { + fetchResponse.AddError("my_topic", 0, ErrNoError) + return fetchResponse + } + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5) + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7) + fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11) + return fetchResponse + } + return nil + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 3) + if err != nil { + t.Fatal(err) + } + + // Then: messages with offsets 1 and 2 are not returned even though they + // are present in the response. + assertMessageOffset(t, <-consumer.Messages(), 5) + assertMessageOffset(t, <-consumer.Messages(), 7) + assertMessageOffset(t, <-consumer.Messages(), 11) + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// If leadership for a partition is changing then consumer resolves the new +// leader and switches to it. +func TestConsumerRebalancingMultiplePartitions(t *testing.T) { + // initial setup + seedBroker := newMockBroker(t, 10) + leader0 := newMockBroker(t, 0) + leader1 := newMockBroker(t, 1) + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(leader0.Addr(), leader0.BrokerID()). + SetBroker(leader1.Addr(), leader1.BrokerID()). + SetLeader("my_topic", 0, leader0.BrokerID()). + SetLeader("my_topic", 1, leader1.BrokerID()), + }) + + mockOffsetResponse1 := newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 1000). + SetOffset("my_topic", 1, OffsetOldest, 0). + SetOffset("my_topic", 1, OffsetNewest, 1000) + leader0.SetHandlerByMap(map[string]MockResponse{ + "OffsetRequest": mockOffsetResponse1, + "FetchRequest": newMockFetchResponse(t, 1), + }) + leader1.SetHandlerByMap(map[string]MockResponse{ + "OffsetRequest": mockOffsetResponse1, + "FetchRequest": newMockFetchResponse(t, 1), + }) + + // launch test goroutines + config := NewConfig() + config.Consumer.Retry.Backoff = 50 + master, err := NewConsumer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + // we expect to end up (eventually) consuming exactly ten messages on each partition + var wg sync.WaitGroup + for i := int32(0); i < 2; i++ { + consumer, err := master.ConsumePartition("my_topic", i, 0) + if err != nil { + t.Error(err) + } + + go func(c PartitionConsumer) { + for err := range c.Errors() { + t.Error(err) + } + }(consumer) + + wg.Add(1) + go func(partition int32, c PartitionConsumer) { + for i := 0; i < 10; i++ { + message := <-consumer.Messages() + if message.Offset != int64(i) { + t.Error("Incorrect message offset!", i, partition, message.Offset) + } + if message.Partition != partition { + t.Error("Incorrect message partition!") + } + } + safeClose(t, consumer) + wg.Done() + }(i, consumer) + } + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 1") + // Stage 1: + // * my_topic/0 -> leader0 serves 4 messages + // * my_topic/1 -> leader1 serves 0 messages + + mockFetchResponse := newMockFetchResponse(t, 1) + for i := 0; i < 4; i++ { + mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) + } + leader0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse, + }) + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 2") + // Stage 2: + // * leader0 says that it is no longer serving my_topic/0 + // * seedBroker tells that leader1 is serving my_topic/0 now + + // seed broker tells that the new partition 0 leader is leader1 + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetLeader("my_topic", 0, leader1.BrokerID()). + SetLeader("my_topic", 1, leader1.BrokerID()), + }) + + // leader0 says no longer leader of partition 0 + leader0.SetHandler(func(req *request) (res encoder) { + switch req.body.(type) { + case *FetchRequest: + fetchResponse := new(FetchResponse) + fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition) + return fetchResponse + } + return nil + }) + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 3") + // Stage 3: + // * my_topic/0 -> leader1 serves 3 messages + // * my_topic/1 -> leader1 server 8 messages + + // leader1 provides 3 message on partition 0, and 8 messages on partition 1 + mockFetchResponse2 := newMockFetchResponse(t, 2) + for i := 4; i < 7; i++ { + mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg) + } + for i := 0; i < 8; i++ { + mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg) + } + leader1.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse2, + }) + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 4") + // Stage 4: + // * my_topic/0 -> leader1 serves 3 messages + // * my_topic/1 -> leader1 tells that it is no longer the leader + // * seedBroker tells that leader0 is a new leader for my_topic/1 + + // metadata assigns 0 to leader1 and 1 to leader0 + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetLeader("my_topic", 0, leader1.BrokerID()). + SetLeader("my_topic", 1, leader0.BrokerID()), + }) + + // leader1 provides three more messages on partition0, says no longer leader of partition1 + mockFetchResponse3 := newMockFetchResponse(t, 3). + SetMessage("my_topic", 0, int64(7), testMsg). + SetMessage("my_topic", 0, int64(8), testMsg). + SetMessage("my_topic", 0, int64(9), testMsg) + leader1.SetHandler(func(req *request) (res encoder) { + switch reqBody := req.body.(type) { + case *FetchRequest: + res := mockFetchResponse3.For(reqBody).(*FetchResponse) + res.AddError("my_topic", 1, ErrNotLeaderForPartition) + return res + + } + return nil + }) + + // leader0 provides two messages on partition 1 + mockFetchResponse4 := newMockFetchResponse(t, 2) + for i := 8; i < 10; i++ { + mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg) + } + leader0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse4, + }) + + wg.Wait() + safeClose(t, master) + leader1.Close() + leader0.Close() + seedBroker.Close() +} + +// When two partitions have the same broker as the leader, if one partition +// consumer channel buffer is full then that does not affect the ability to +// read messages by the other consumer. +func TestConsumerInterleavedClose(t *testing.T) { + // Given + broker0 := newMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()). + SetLeader("my_topic", 1, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 1000). + SetOffset("my_topic", 0, OffsetNewest, 1100). + SetOffset("my_topic", 1, OffsetOldest, 2000). + SetOffset("my_topic", 1, OffsetNewest, 2100), + "FetchRequest": newMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 1000, testMsg). + SetMessage("my_topic", 0, 1001, testMsg). + SetMessage("my_topic", 0, 1002, testMsg). + SetMessage("my_topic", 1, 2000, testMsg), + }) + + config := NewConfig() + config.ChannelBufferSize = 0 + master, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + c0, err := master.ConsumePartition("my_topic", 0, 1000) + if err != nil { + t.Fatal(err) + } + + c1, err := master.ConsumePartition("my_topic", 1, 2000) + if err != nil { + t.Fatal(err) + } + + // When/Then: we can read from partition 0 even if nobody reads from partition 1 + assertMessageOffset(t, <-c0.Messages(), 1000) + assertMessageOffset(t, <-c0.Messages(), 1001) + assertMessageOffset(t, <-c0.Messages(), 1002) + + safeClose(t, c1) + safeClose(t, c0) + safeClose(t, master) + broker0.Close() +} + +func TestConsumerBounceWithReferenceOpen(t *testing.T) { + broker0 := newMockBroker(t, 0) + broker0Addr := broker0.Addr() + broker1 := newMockBroker(t, 1) + + mockMetadataResponse := newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetBroker(broker1.Addr(), broker1.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()). + SetLeader("my_topic", 1, broker1.BrokerID()) + + mockOffsetResponse := newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 1000). + SetOffset("my_topic", 0, OffsetNewest, 1100). + SetOffset("my_topic", 1, OffsetOldest, 2000). + SetOffset("my_topic", 1, OffsetNewest, 2100) + + mockFetchResponse := newMockFetchResponse(t, 1) + for i := 0; i < 10; i++ { + mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg) + mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg) + } + + broker0.SetHandlerByMap(map[string]MockResponse{ + "OffsetRequest": mockOffsetResponse, + "FetchRequest": mockFetchResponse, + }) + broker1.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": mockMetadataResponse, + "OffsetRequest": mockOffsetResponse, + "FetchRequest": mockFetchResponse, + }) + + config := NewConfig() + config.Consumer.Return.Errors = true + config.Consumer.Retry.Backoff = 100 * time.Millisecond + config.ChannelBufferSize = 1 + master, err := NewConsumer([]string{broker1.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + c0, err := master.ConsumePartition("my_topic", 0, 1000) + if err != nil { + t.Fatal(err) + } + + c1, err := master.ConsumePartition("my_topic", 1, 2000) + if err != nil { + t.Fatal(err) + } + + // read messages from both partition to make sure that both brokers operate + // normally. + assertMessageOffset(t, <-c0.Messages(), 1000) + assertMessageOffset(t, <-c1.Messages(), 2000) + + // Simulate broker shutdown. Note that metadata response does not change, + // that is the leadership does not move to another broker. So partition + // consumer will keep retrying to restore the connection with the broker. + broker0.Close() + + // Make sure that while the partition/0 leader is down, consumer/partition/1 + // is capable of pulling messages from broker1. + for i := 1; i < 7; i++ { + offset := (<-c1.Messages()).Offset + if offset != int64(2000+i) { + t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i)) + } + } + + // Bring broker0 back to service. + broker0 = newMockBrokerAddr(t, 0, broker0Addr) + broker0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse, + }) + + // Read the rest of messages from both partitions. + for i := 7; i < 10; i++ { + assertMessageOffset(t, <-c1.Messages(), int64(2000+i)) + } + for i := 1; i < 10; i++ { + assertMessageOffset(t, <-c0.Messages(), int64(1000+i)) + } + + select { + case <-c0.Errors(): + default: + t.Errorf("Partition consumer should have detected broker restart") + } + + safeClose(t, c1) + safeClose(t, c0) + safeClose(t, master) + broker0.Close() + broker1.Close() +} + +func TestConsumerOffsetOutOfRange(t *testing.T) { + // Given + broker0 := newMockBroker(t, 2) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": newMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": newMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 2345), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When/Then + if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange { + t.Fatal("Should return ErrOffsetOutOfRange, got:", err) + } + if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange { + t.Fatal("Should return ErrOffsetOutOfRange, got:", err) + } + if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange { + t.Fatal("Should return ErrOffsetOutOfRange, got:", err) + } + + safeClose(t, master) + broker0.Close() +} + +func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { + if msg.Offset != expectedOffset { + t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go b/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go new file mode 100644 index 000000000..f4fde18ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go @@ -0,0 +1,35 @@ +package sarama + +import ( + "encoding/binary" + "hash/crc32" +) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + + if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { + return PacketDecodingError{"CRC didn't match"} + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go new file mode 100644 index 000000000..b91efaa0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go @@ -0,0 +1,62 @@ +package sarama + +import "fmt" + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +// Encode takes an Encoder and turns it into bytes. +func encode(e encoder) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// Decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go b/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go new file mode 100644 index 000000000..70f2b9bfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go @@ -0,0 +1,146 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica infomation not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md new file mode 100644 index 000000000..b6588051e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md @@ -0,0 +1,9 @@ +# Sarama examples + +This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama) + +In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version. + +#### HTTP server + +[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore new file mode 100644 index 000000000..9f6ed425f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore @@ -0,0 +1,2 @@ +http_server +http_server.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md new file mode 100644 index 000000000..5ff2bc253 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md @@ -0,0 +1,7 @@ +# HTTP server example + +This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background. + +If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background. + +One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go new file mode 100644 index 000000000..03e47b6b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go @@ -0,0 +1,246 @@ +package main + +import ( + "github.com/Shopify/sarama" + + "crypto/tls" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" +) + +var ( + addr = flag.String("addr", ":8080", "The address to bind to") + brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") + verbose = flag.Bool("verbose", false, "Turn on Sarama logging") + certFile = flag.String("certificate", "", "The optional certificate file for client authentication") + keyFile = flag.String("key", "", "The optional key file for client authentication") + caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") + verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain") +) + +func main() { + flag.Parse() + + if *verbose { + sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) + } + + if *brokers == "" { + flag.PrintDefaults() + os.Exit(1) + } + + brokerList := strings.Split(*brokers, ",") + log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", ")) + + server := &Server{ + DataCollector: newDataCollector(brokerList), + AccessLogProducer: newAccessLogProducer(brokerList), + } + defer func() { + if err := server.Close(); err != nil { + log.Println("Failed to close server", err) + } + }() + + log.Fatal(server.Run(*addr)) +} + +func createTlsConfiguration() (t *tls.Config) { + if *certFile != "" && *keyFile != "" && *caFile != "" { + cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) + if err != nil { + log.Fatal(err) + } + + caCert, err := ioutil.ReadFile(*caFile) + if err != nil { + log.Fatal(err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + t = &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: *verifySsl, + } + } + // will be nil by default if nothing is provided + return t +} + +type Server struct { + DataCollector sarama.SyncProducer + AccessLogProducer sarama.AsyncProducer +} + +func (s *Server) Close() error { + if err := s.DataCollector.Close(); err != nil { + log.Println("Failed to shut down data collector cleanly", err) + } + + if err := s.AccessLogProducer.Close(); err != nil { + log.Println("Failed to shut down access log producer cleanly", err) + } + + return nil +} + +func (s *Server) Handler() http.Handler { + return s.withAccessLog(s.collectQueryStringData()) +} + +func (s *Server) Run(addr string) error { + httpServer := &http.Server{ + Addr: addr, + Handler: s.Handler(), + } + + log.Printf("Listening for requests on %s...\n", addr) + return httpServer.ListenAndServe() +} + +func (s *Server) collectQueryStringData() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + // We are not setting a message key, which means that all messages will + // be distributed randomly over the different partitions. + partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ + Topic: "important", + Value: sarama.StringEncoder(r.URL.RawQuery), + }) + + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Failed to store your data:, %s", err) + } else { + // The tuple (topic, partition, offset) can be used as a unique identifier + // for a message in a Kafka cluster. + fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) + } + }) +} + +type accessLogEntry struct { + Method string `json:"method"` + Host string `json:"host"` + Path string `json:"path"` + IP string `json:"ip"` + ResponseTime float64 `json:"response_time"` + + encoded []byte + err error +} + +func (ale *accessLogEntry) ensureEncoded() { + if ale.encoded == nil && ale.err == nil { + ale.encoded, ale.err = json.Marshal(ale) + } +} + +func (ale *accessLogEntry) Length() int { + ale.ensureEncoded() + return len(ale.encoded) +} + +func (ale *accessLogEntry) Encode() ([]byte, error) { + ale.ensureEncoded() + return ale.encoded, ale.err +} + +func (s *Server) withAccessLog(next http.Handler) http.Handler { + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + started := time.Now() + + next.ServeHTTP(w, r) + + entry := &accessLogEntry{ + Method: r.Method, + Host: r.Host, + Path: r.RequestURI, + IP: r.RemoteAddr, + ResponseTime: float64(time.Since(started)) / float64(time.Second), + } + + // We will use the client's IP address as key. This will cause + // all the access log entries of the same IP address to end up + // on the same partition. + s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ + Topic: "access_log", + Key: sarama.StringEncoder(r.RemoteAddr), + Value: entry, + } + }) +} + +func newDataCollector(brokerList []string) sarama.SyncProducer { + + // For the data collector, we are looking for strong consistency semantics. + // Because we don't change the flush settings, sarama will try to produce messages + // as fast as possible to keep latency low. + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message + config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message + tlsConfig := createTlsConfiguration() + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + // On the broker side, you may want to change the following settings to get + // stronger consistency guarantees: + // - For your broker, set `unclean.leader.election.enable` to false + // - For the topic, you could increase `min.insync.replicas`. + + producer, err := sarama.NewSyncProducer(brokerList, config) + if err != nil { + log.Fatalln("Failed to start Sarama producer:", err) + } + + return producer +} + +func newAccessLogProducer(brokerList []string) sarama.AsyncProducer { + + // For the access log, we are looking for AP semantics, with high throughput. + // By creating batches of compressed messages, we reduce network I/O at a cost of more latency. + config := sarama.NewConfig() + tlsConfig := createTlsConfiguration() + if tlsConfig != nil { + config.Net.TLS.Enable = true + config.Net.TLS.Config = tlsConfig + } + config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack + config.Producer.Compression = sarama.CompressionSnappy // Compress messages + config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms + + producer, err := sarama.NewAsyncProducer(brokerList, config) + if err != nil { + log.Fatalln("Failed to start Sarama producer:", err) + } + + // We will just log to STDOUT if we're not able to produce messages. + // Note: messages will only be returned here after all retry attempts are exhausted. + go func() { + for err := range producer.Errors() { + log.Println("Failed to write access log entry:", err) + } + }() + + return producer +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go new file mode 100644 index 000000000..7b2451e28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go @@ -0,0 +1,109 @@ +package main + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Shopify/sarama" + "github.com/Shopify/sarama/mocks" +) + +// In normal operation, we expect one access log entry, +// and one data collector entry. Let's assume both will succeed. +// We should return a HTTP 200 status. +func TestCollectSuccessfully(t *testing.T) { + dataCollectorMock := mocks.NewSyncProducer(t, nil) + dataCollectorMock.ExpectSendMessageAndSucceed() + + accessLogProducerMock := mocks.NewAsyncProducer(t, nil) + accessLogProducerMock.ExpectInputAndSucceed() + + // Now, use dependency injection to use the mocks. + s := &Server{ + DataCollector: dataCollectorMock, + AccessLogProducer: accessLogProducerMock, + } + + // The Server's Close call is important; it will call Close on + // the two mock producers, which will then validate whether all + // expectations are resolved. + defer safeClose(t, s) + + req, err := http.NewRequest("GET", "http://example.com/?data", nil) + if err != nil { + t.Fatal(err) + } + res := httptest.NewRecorder() + s.Handler().ServeHTTP(res, req) + + if res.Code != 200 { + t.Errorf("Expected HTTP status 200, found %d", res.Code) + } + + if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" { + t.Error("Unexpected response body", res.Body) + } +} + +// Now, let's see if we handle the case of not being able to produce +// to the data collector properly. In this case we should return a 500 status. +func TestCollectionFailure(t *testing.T) { + dataCollectorMock := mocks.NewSyncProducer(t, nil) + dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut) + + accessLogProducerMock := mocks.NewAsyncProducer(t, nil) + accessLogProducerMock.ExpectInputAndSucceed() + + s := &Server{ + DataCollector: dataCollectorMock, + AccessLogProducer: accessLogProducerMock, + } + defer safeClose(t, s) + + req, err := http.NewRequest("GET", "http://example.com/?data", nil) + if err != nil { + t.Fatal(err) + } + res := httptest.NewRecorder() + s.Handler().ServeHTTP(res, req) + + if res.Code != 500 { + t.Errorf("Expected HTTP status 500, found %d", res.Code) + } +} + +// We don't expect any data collector calls because the path is wrong, +// so we are not setting any expectations on the dataCollectorMock. It +// will still generate an access log entry though. +func TestWrongPath(t *testing.T) { + dataCollectorMock := mocks.NewSyncProducer(t, nil) + + accessLogProducerMock := mocks.NewAsyncProducer(t, nil) + accessLogProducerMock.ExpectInputAndSucceed() + + s := &Server{ + DataCollector: dataCollectorMock, + AccessLogProducer: accessLogProducerMock, + } + defer safeClose(t, s) + + req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil) + if err != nil { + t.Fatal(err) + } + res := httptest.NewRecorder() + + s.Handler().ServeHTTP(res, req) + + if res.Code != 404 { + t.Errorf("Expected HTTP status 404, found %d", res.Code) + } +} + +func safeClose(t *testing.T, o io.Closer) { + if err := o.Close(); err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go new file mode 100644 index 000000000..3c00fad65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go @@ -0,0 +1,123 @@ +package sarama + +type fetchRequestBlock struct { + fetchOffset int64 + maxBytes int32 +} + +func (f *fetchRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(f.fetchOffset) + pe.putInt32(f.maxBytes) + return nil +} + +func (f *fetchRequestBlock) decode(pd packetDecoder) (err error) { + if f.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if f.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + blocks map[string]map[int32]*fetchRequestBlock +} + +func (f *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(f.MaxWaitTime) + pe.putInt32(f.MinBytes) + err = pe.putArrayLength(len(f.blocks)) + if err != nil { + return err + } + for topic, blocks := range f.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe) + if err != nil { + return err + } + } + } + return nil +} + +func (f *FetchRequest) decode(pd packetDecoder) (err error) { + if _, err = pd.getInt32(); err != nil { + return err + } + if f.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if f.MinBytes, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + f.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + f.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd); err != nil { + return nil + } + f.blocks[topic][partition] = fetchBlock + } + } + return nil +} + +func (f *FetchRequest) key() int16 { + return 1 +} + +func (f *FetchRequest) version() int16 { + return 0 +} + +func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if f.blocks == nil { + f.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if f.blocks[topic] == nil { + f.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + + f.blocks[topic][partitionID] = tmp +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go new file mode 100644 index 000000000..32c083c7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go @@ -0,0 +1,34 @@ +package sarama + +import "testing" + +var ( + fetchRequestNoBlocks = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + fetchRequestWithProperties = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF, + 0x00, 0x00, 0x00, 0x00} + + fetchRequestOneBlock = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56} +) + +func TestFetchRequest(t *testing.T) { + request := new(FetchRequest) + testRequest(t, "no blocks", request, fetchRequestNoBlocks) + + request.MaxWaitTime = 0x20 + request.MinBytes = 0xEF + testRequest(t, "with properties", request, fetchRequestWithProperties) + + request.MaxWaitTime = 0 + request.MinBytes = 0 + request.AddBlock("topic", 0x12, 0x34, 0x56) + testRequest(t, "one block", request, fetchRequestOneBlock) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go new file mode 100644 index 000000000..1ac543921 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go @@ -0,0 +1,173 @@ +package sarama + +type FetchResponseBlock struct { + Err KError + HighWaterMarkOffset int64 + MsgSet MessageSet +} + +func (pr *FetchResponseBlock) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pr.Err = KError(tmp) + + pr.HighWaterMarkOffset, err = pd.getInt64() + if err != nil { + return err + } + + msgSetSize, err := pd.getInt32() + if err != nil { + return err + } + + msgSetDecoder, err := pd.getSubset(int(msgSetSize)) + if err != nil { + return err + } + err = (&pr.MsgSet).decode(msgSetDecoder) + + return err +} + +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock +} + +func (pr *FetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(pr.Err)) + + pe.putInt64(pr.HighWaterMarkOffset) + + pe.push(&lengthField{}) + err = pr.MsgSet.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (fr *FetchResponse) decode(pd packetDecoder) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + fr.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + fr.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(FetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + fr.Blocks[name][id] = block + } + } + + return nil +} + +func (fr *FetchResponse) encode(pe packetEncoder) (err error) { + err = pe.putArrayLength(len(fr.Blocks)) + if err != nil { + return err + } + + for topic, partitions := range fr.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + + for id, block := range partitions { + pe.putInt32(id) + err = block.encode(pe) + if err != nil { + return err + } + } + + } + return nil +} + +func (fr *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if fr.Blocks == nil { + return nil + } + + if fr.Blocks[topic] == nil { + return nil + } + + return fr.Blocks[topic][partition] +} + +func (fr *FetchResponse) AddError(topic string, partition int32, err KError) { + if fr.Blocks == nil { + fr.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := fr.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + fr.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + frb.Err = err +} + +func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + if fr.Blocks == nil { + fr.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := fr.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + fr.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + var kb []byte + var vb []byte + if key != nil { + kb, _ = key.Encode() + } + if value != nil { + vb, _ = value.Encode() + } + msg := &Message{Key: kb, Value: vb} + msgBlock := &MessageBlock{Msg: msg, Offset: offset} + frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go new file mode 100644 index 000000000..a23a05340 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go @@ -0,0 +1,84 @@ +package sarama + +import ( + "bytes" + "testing" +) + +var ( + emptyFetchResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} + + oneMessageFetchResponse = []byte{ + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x05, + 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, + 0x00, 0x00, 0x00, 0x1C, + // messageSet + 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, + // message + 0x23, 0x96, 0x4a, 0xf7, // CRC + 0x00, + 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} +) + +func TestEmptyFetchResponse(t *testing.T) { + response := FetchResponse{} + testDecodable(t, "empty", &response, emptyFetchResponse) + + if len(response.Blocks) != 0 { + t.Error("Decoding produced topic blocks where there were none.") + } + +} + +func TestOneMessageFetchResponse(t *testing.T) { + response := FetchResponse{} + testDecodable(t, "one message", &response, oneMessageFetchResponse) + + if len(response.Blocks) != 1 { + t.Fatal("Decoding produced incorrect number of topic blocks.") + } + + if len(response.Blocks["topic"]) != 1 { + t.Fatal("Decoding produced incorrect number of partition blocks for topic.") + } + + block := response.GetBlock("topic", 5) + if block == nil { + t.Fatal("GetBlock didn't return block.") + } + if block.Err != ErrOffsetOutOfRange { + t.Error("Decoding didn't produce correct error code.") + } + if block.HighWaterMarkOffset != 0x10101010 { + t.Error("Decoding didn't produce correct high water mark offset.") + } + if block.MsgSet.PartialTrailingMessage { + t.Error("Decoding detected a partial trailing message where there wasn't one.") + } + + if len(block.MsgSet.Messages) != 1 { + t.Fatal("Decoding produced incorrect number of messages.") + } + msgBlock := block.MsgSet.Messages[0] + if msgBlock.Offset != 0x550000 { + t.Error("Decoding produced incorrect message offset.") + } + msg := msgBlock.Msg + if msg.Codec != CompressionNone { + t.Error("Decoding produced incorrect message compression.") + } + if msg.Key != nil { + t.Error("Decoding produced message key where there was none.") + } + if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { + t.Error("Decoding produced incorrect message value.") + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go new file mode 100644 index 000000000..9e8e32968 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go @@ -0,0 +1,90 @@ +package sarama + +import ( + "fmt" + "testing" + "time" +) + +func TestFuncConnectionFailure(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + Proxies["kafka1"].Enabled = false + SaveProxy(t, "kafka1") + + config := NewConfig() + config.Metadata.Retry.Max = 1 + + _, err := NewClient([]string{kafkaBrokers[0]}, config) + if err != ErrOutOfBrokers { + t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) + } +} + +func TestFuncClientMetadata(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Metadata.Retry.Backoff = 10 * time.Millisecond + client, err := NewClient(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + + if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + partitions, err := client.Partitions("test.4") + if err != nil { + t.Error(err) + } + if len(partitions) != 4 { + t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions) + } + + partitions, err = client.Partitions("test.1") + if err != nil { + t.Error(err) + } + if len(partitions) != 1 { + t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions) + } + + safeClose(t, client) +} + +func TestFuncClientCoordinator(t *testing.T) { + checkKafkaVersion(t, "0.8.2") + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + client, err := NewClient(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i)) + if err != nil { + t.Error(err) + } + + if connected, err := broker.Connected(); !connected || err != nil { + t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr()) + } + } + + safeClose(t, client) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go new file mode 100644 index 000000000..ab8433109 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go @@ -0,0 +1,61 @@ +package sarama + +import ( + "math" + "testing" +) + +func TestFuncConsumerOffsetOutOfRange(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + consumer, err := NewConsumer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange { + t.Error("Expected ErrOffsetOutOfRange, got:", err) + } + + if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange { + t.Error("Expected ErrOffsetOutOfRange, got:", err) + } + + safeClose(t, consumer) +} + +func TestConsumerHighWaterMarkOffset(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + p, err := NewSyncProducer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + defer safeClose(t, p) + + _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")}) + if err != nil { + t.Fatal(err) + } + + c, err := NewConsumer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + defer safeClose(t, c) + + pc, err := c.ConsumePartition("test.1", 0, OffsetOldest) + if err != nil { + t.Fatal(err) + } + + <-pc.Messages() + + if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 { + t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo) + } + + safeClose(t, pc) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go new file mode 100644 index 000000000..1504e7600 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go @@ -0,0 +1,203 @@ +package sarama + +import ( + "fmt" + "sync" + "testing" + "time" +) + +const TestBatchSize = 1000 + +func TestFuncProducing(t *testing.T) { + config := NewConfig() + testProducingMessages(t, config) +} + +func TestFuncProducingGzip(t *testing.T) { + config := NewConfig() + config.Producer.Compression = CompressionGZIP + testProducingMessages(t, config) +} + +func TestFuncProducingSnappy(t *testing.T) { + config := NewConfig() + config.Producer.Compression = CompressionSnappy + testProducingMessages(t, config) +} + +func TestFuncProducingNoResponse(t *testing.T) { + config := NewConfig() + config.Producer.RequiredAcks = NoResponse + testProducingMessages(t, config) +} + +func TestFuncProducingFlushing(t *testing.T) { + config := NewConfig() + config.Producer.Flush.Messages = TestBatchSize / 8 + config.Producer.Flush.Frequency = 250 * time.Millisecond + testProducingMessages(t, config) +} + +func TestFuncMultiPartitionProduce(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config := NewConfig() + config.ChannelBufferSize = 20 + config.Producer.Flush.Frequency = 50 * time.Millisecond + config.Producer.Flush.Messages = 200 + config.Producer.Return.Successes = true + producer, err := NewSyncProducer(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(TestBatchSize) + + for i := 1; i <= TestBatchSize; i++ { + go func(i int) { + defer wg.Done() + msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))} + if _, _, err := producer.SendMessage(msg); err != nil { + t.Error(i, err) + } + }(i) + } + + wg.Wait() + if err := producer.Close(); err != nil { + t.Error(err) + } +} + +func TestFuncProducingToInvalidTopic(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + producer, err := NewSyncProducer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + + if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + + safeClose(t, producer) +} + +func testProducingMessages(t *testing.T, config *Config) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config.Producer.Return.Successes = true + config.Consumer.Return.Errors = true + + client, err := NewClient(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + + master, err := NewConsumerFromClient(client) + if err != nil { + t.Fatal(err) + } + consumer, err := master.ConsumePartition("test.1", 0, OffsetNewest) + if err != nil { + t.Fatal(err) + } + + producer, err := NewAsyncProducerFromClient(client) + if err != nil { + t.Fatal(err) + } + + expectedResponses := TestBatchSize + for i := 1; i <= TestBatchSize; { + msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))} + select { + case producer.Input() <- msg: + i++ + case ret := <-producer.Errors(): + t.Fatal(ret.Err) + case <-producer.Successes(): + expectedResponses-- + } + } + for expectedResponses > 0 { + select { + case ret := <-producer.Errors(): + t.Fatal(ret.Err) + case <-producer.Successes(): + expectedResponses-- + } + } + safeClose(t, producer) + + for i := 1; i <= TestBatchSize; i++ { + select { + case <-time.After(10 * time.Second): + t.Fatal("Not received any more events in the last 10 seconds.") + + case err := <-consumer.Errors(): + t.Error(err) + + case message := <-consumer.Messages(): + if string(message.Value) != fmt.Sprintf("testing %d", i) { + t.Fatalf("Unexpected message with index %d: %s", i, message.Value) + } + } + + } + safeClose(t, consumer) + safeClose(t, client) +} + +// Benchmarks + +func BenchmarkProducerSmall(b *testing.B) { + benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128))) +} +func BenchmarkProducerMedium(b *testing.B) { + benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024))) +} +func BenchmarkProducerLarge(b *testing.B) { + benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192))) +} +func BenchmarkProducerSmallSinglePartition(b *testing.B) { + benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128))) +} +func BenchmarkProducerMediumSnappy(b *testing.B) { + conf := NewConfig() + conf.Producer.Compression = CompressionSnappy + benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024))) +} + +func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) { + setupFunctionalTest(b) + defer teardownFunctionalTest(b) + + producer, err := NewAsyncProducer(kafkaBrokers, conf) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + + for i := 1; i <= b.N; { + msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value} + select { + case producer.Input() <- msg: + i++ + case ret := <-producer.Errors(): + b.Fatal(ret.Err) + } + } + safeClose(b, producer) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go new file mode 100644 index 000000000..171002ee9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go @@ -0,0 +1,146 @@ +package sarama + +import ( + "log" + "math/rand" + "net" + "os" + "strconv" + "strings" + "testing" + "time" + + toxiproxy "github.com/Shopify/toxiproxy/client" +) + +const ( + VagrantToxiproxy = "http://192.168.100.67:8474" + VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095" + VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185" +) + +var ( + kafkaAvailable, kafkaRequired bool + kafkaBrokers []string + + proxyClient *toxiproxy.Client + Proxies map[string]*toxiproxy.Proxy + ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"} + KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"} +) + +func init() { + if os.Getenv("DEBUG") == "true" { + Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) + } + + seed := time.Now().UTC().UnixNano() + if tmp := os.Getenv("TEST_SEED"); tmp != "" { + seed, _ = strconv.ParseInt(tmp, 0, 64) + } + Logger.Println("Using random seed:", seed) + rand.Seed(seed) + + proxyAddr := os.Getenv("TOXIPROXY_ADDR") + if proxyAddr == "" { + proxyAddr = VagrantToxiproxy + } + proxyClient = toxiproxy.NewClient(proxyAddr) + + kafkaPeers := os.Getenv("KAFKA_PEERS") + if kafkaPeers == "" { + kafkaPeers = VagrantKafkaPeers + } + kafkaBrokers = strings.Split(kafkaPeers, ",") + + if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil { + if err = c.Close(); err == nil { + kafkaAvailable = true + } + } + + kafkaRequired = os.Getenv("CI") != "" +} + +func checkKafkaAvailability(t testing.TB) { + if !kafkaAvailable { + if kafkaRequired { + t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) + } else { + t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) + } + } +} + +func checkKafkaVersion(t testing.TB, requiredVersion string) { + kafkaVersion := os.Getenv("KAFKA_VERSION") + if kafkaVersion == "" { + t.Logf("No KAFKA_VERSION set. This tests requires Kafka version %s or higher. Continuing...", requiredVersion) + } else { + available := parseKafkaVersion(kafkaVersion) + required := parseKafkaVersion(requiredVersion) + if !available.satisfies(required) { + t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion) + } + } +} + +func resetProxies(t testing.TB) { + if err := proxyClient.ResetState(); err != nil { + t.Error(err) + } + Proxies = nil +} + +func fetchProxies(t testing.TB) { + var err error + Proxies, err = proxyClient.Proxies() + if err != nil { + t.Fatal(err) + } +} + +func SaveProxy(t *testing.T, px string) { + if err := Proxies[px].Save(); err != nil { + t.Fatal(err) + } +} + +func setupFunctionalTest(t testing.TB) { + checkKafkaAvailability(t) + resetProxies(t) + fetchProxies(t) +} + +func teardownFunctionalTest(t testing.TB) { + resetProxies(t) +} + +type kafkaVersion []int + +func (kv kafkaVersion) satisfies(other kafkaVersion) bool { + var ov int + for index, v := range kv { + if len(other) <= index { + ov = 0 + } else { + ov = other[index] + } + + if v < ov { + return false + } + } + return true +} + +func parseKafkaVersion(version string) kafkaVersion { + numbers := strings.Split(version, ".") + result := make(kafkaVersion, 0, len(numbers)) + for _, number := range numbers { + nr, _ := strconv.Atoi(number) + result = append(result, nr) + } + + return result +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go b/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go new file mode 100644 index 000000000..70078be5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go @@ -0,0 +1,29 @@ +package sarama + +import "encoding/binary" + +// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. +type lengthField struct { + startOffset int +} + +func (l *lengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *lengthField) reserveLength() int { + return 4 +} + +func (l *lengthField) run(curOffset int, buf []byte) error { + binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) + return nil +} + +func (l *lengthField) check(curOffset int, buf []byte) error { + if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message.go new file mode 100644 index 000000000..49b19c5a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/message.go @@ -0,0 +1,154 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +// only the last two bits are really used +const compressionCodecMask int8 = 0x03 + +const ( + CompressionNone CompressionCodec = 0 + CompressionGZIP CompressionCodec = 1 + CompressionSnappy CompressionCodec = 2 +) + +// The spec just says: "This is a version id used to allow backwards compatible evolution of the message +// binary format." but it doesn't say what the current value is, so presumably 0... +const messageFormat int8 = 0 + +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + + compressedCache []byte +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(&crc32Field{}) + + pe.putInt8(messageFormat) + + attributes := int8(m.Codec) & compressionCodecMask + pe.putInt8(attributes) + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else { + switch m.Codec { + case CompressionNone: + payload = m.Value + case CompressionGZIP: + var buf bytes.Buffer + writer := gzip.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + case CompressionSnappy: + tmp := snappyEncode(m.Value) + m.compressedCache = tmp + payload = m.compressedCache + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + } + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + err = pd.push(&crc32Field{}) + if err != nil { + return err + } + + format, err := pd.getInt8() + if err != nil { + return err + } + if format != messageFormat { + return PacketDecodingError{"unexpected messageFormat"} + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + switch m.Codec { + case CompressionNone: + // nothing to do + case CompressionGZIP: + if m.Value == nil { + return PacketDecodingError{"GZIP compression specified, but no data to uncompress"} + } + reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + if err != nil { + return err + } + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + return m.decodeSet() + case CompressionSnappy: + if m.Value == nil { + return PacketDecodingError{"Snappy compression specified, but no data to uncompress"} + } + if m.Value, err = snappyDecode(m.Value); err != nil { + return err + } + return m.decodeSet() + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} + } + + err = pd.pop() + if err != nil { + return err + } + + return nil +} + +// decodes a message set from a previousy encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go new file mode 100644 index 000000000..f028784e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go @@ -0,0 +1,89 @@ +package sarama + +type MessageBlock struct { + Offset int64 + Msg *Message +} + +// Messages convenience helper which returns either all the +// messages that are wrapped in this block +func (msb *MessageBlock) Messages() []*MessageBlock { + if msb.Msg.Set != nil { + return msb.Msg.Set.Messages + } + return []*MessageBlock{msb} +} + +func (msb *MessageBlock) encode(pe packetEncoder) error { + pe.putInt64(msb.Offset) + pe.push(&lengthField{}) + err := msb.Msg.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (msb *MessageBlock) decode(pd packetDecoder) (err error) { + if msb.Offset, err = pd.getInt64(); err != nil { + return err + } + + if err = pd.push(&lengthField{}); err != nil { + return err + } + + msb.Msg = new(Message) + if err = msb.Msg.decode(pd); err != nil { + return err + } + + if err = pd.pop(); err != nil { + return err + } + + return nil +} + +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + Messages []*MessageBlock +} + +func (ms *MessageSet) encode(pe packetEncoder) error { + for i := range ms.Messages { + err := ms.Messages[i].encode(pe) + if err != nil { + return err + } + } + return nil +} + +func (ms *MessageSet) decode(pd packetDecoder) (err error) { + ms.Messages = nil + + for pd.remaining() > 0 { + msb := new(MessageBlock) + err = msb.decode(pd) + switch err { + case nil: + ms.Messages = append(ms.Messages, msb) + case ErrInsufficientData: + // As an optimization the server is allowed to return a partial message at the + // end of the message set. Clients should handle this case. So we just ignore such things. + ms.PartialTrailingMessage = true + return nil + default: + return err + } + } + + return nil +} + +func (ms *MessageSet) addMessage(msg *Message) { + block := new(MessageBlock) + block.Msg = msg + ms.Messages = append(ms.Messages, block) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go new file mode 100644 index 000000000..1dae896fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go @@ -0,0 +1,113 @@ +package sarama + +import "testing" + +var ( + emptyMessage = []byte{ + 167, 236, 104, 3, // CRC + 0x00, // magic version byte + 0x00, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + 0xFF, 0xFF, 0xFF, 0xFF} // value + + emptyGzipMessage = []byte{ + 97, 79, 149, 90, //CRC + 0x00, // magic version byte + 0x01, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + // value + 0x00, 0x00, 0x00, 0x17, + 0x1f, 0x8b, + 0x08, + 0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0} + + emptyBulkSnappyMessage = []byte{ + 180, 47, 53, 209, //CRC + 0x00, // magic version byte + 0x02, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + 0, 0, 0, 42, + 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic + 0, 0, 0, 1, // min version + 0, 0, 0, 1, // default version + 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0} + + emptyBulkGzipMessage = []byte{ + 139, 160, 63, 141, //CRC + 0x00, // magic version byte + 0x01, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + 0x00, 0x00, 0x00, 0x27, // len + 0x1f, 0x8b, // Gzip Magic + 0x08, // deflate compressed + 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0} +) + +func TestMessageEncoding(t *testing.T) { + message := Message{} + testEncodable(t, "empty", &message, emptyMessage) + + message.Value = []byte{} + message.Codec = CompressionGZIP + testEncodable(t, "empty gzip", &message, emptyGzipMessage) +} + +func TestMessageDecoding(t *testing.T) { + message := Message{} + testDecodable(t, "empty", &message, emptyMessage) + if message.Codec != CompressionNone { + t.Error("Decoding produced compression codec where there was none.") + } + if message.Key != nil { + t.Error("Decoding produced key where there was none.") + } + if message.Value != nil { + t.Error("Decoding produced value where there was none.") + } + if message.Set != nil { + t.Error("Decoding produced set where there was none.") + } + + testDecodable(t, "empty gzip", &message, emptyGzipMessage) + if message.Codec != CompressionGZIP { + t.Error("Decoding produced incorrect compression codec (was gzip).") + } + if message.Key != nil { + t.Error("Decoding produced key where there was none.") + } + if message.Value == nil || len(message.Value) != 0 { + t.Error("Decoding produced nil or content-ful value where there was an empty array.") + } +} + +func TestMessageDecodingBulkSnappy(t *testing.T) { + message := Message{} + testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage) + if message.Codec != CompressionSnappy { + t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy) + } + if message.Key != nil { + t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) + } + if message.Set == nil { + t.Error("Decoding produced no set, but one was expected.") + } else if len(message.Set.Messages) != 2 { + t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) + } +} + +func TestMessageDecodingBulkGzip(t *testing.T) { + message := Message{} + testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage) + if message.Codec != CompressionGZIP { + t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP) + } + if message.Key != nil { + t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) + } + if message.Set == nil { + t.Error("Decoding produced no set, but one was expected.") + } else if len(message.Set.Messages) != 2 { + t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go new file mode 100644 index 000000000..130cfd4f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go @@ -0,0 +1,48 @@ +package sarama + +type MetadataRequest struct { + Topics []string +} + +func (mr *MetadataRequest) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(mr.Topics)) + if err != nil { + return err + } + + for i := range mr.Topics { + err = pe.putString(mr.Topics[i]) + if err != nil { + return err + } + } + return nil +} + +func (mr *MetadataRequest) decode(pd packetDecoder) error { + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + mr.Topics = make([]string, topicCount) + for i := range mr.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + mr.Topics[i] = topic + } + return nil +} + +func (mr *MetadataRequest) key() int16 { + return 3 +} + +func (mr *MetadataRequest) version() int16 { + return 0 +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go new file mode 100644 index 000000000..44f3146e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go @@ -0,0 +1,29 @@ +package sarama + +import "testing" + +var ( + metadataRequestNoTopics = []byte{ + 0x00, 0x00, 0x00, 0x00} + + metadataRequestOneTopic = []byte{ + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'} + + metadataRequestThreeTopics = []byte{ + 0x00, 0x00, 0x00, 0x03, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x03, 'b', 'a', 'r', + 0x00, 0x03, 'b', 'a', 'z'} +) + +func TestMetadataRequest(t *testing.T) { + request := new(MetadataRequest) + testRequest(t, "no topics", request, metadataRequestNoTopics) + + request.Topics = []string{"topic1"} + testRequest(t, "one topic", request, metadataRequestOneTopic) + + request.Topics = []string{"foo", "bar", "baz"} + testRequest(t, "three topics", request, metadataRequestThreeTopics) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go new file mode 100644 index 000000000..b82221f7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go @@ -0,0 +1,227 @@ +package sarama + +type PartitionMetadata struct { + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 +} + +func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pm.Err = KError(tmp) + + pm.ID, err = pd.getInt32() + if err != nil { + return err + } + + pm.Leader, err = pd.getInt32() + if err != nil { + return err + } + + pm.Replicas, err = pd.getInt32Array() + if err != nil { + return err + } + + pm.Isr, err = pd.getInt32Array() + if err != nil { + return err + } + + return nil +} + +func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(pm.Err)) + pe.putInt32(pm.ID) + pe.putInt32(pm.Leader) + + err = pe.putInt32Array(pm.Replicas) + if err != nil { + return err + } + + err = pe.putInt32Array(pm.Isr) + if err != nil { + return err + } + + return nil +} + +type TopicMetadata struct { + Err KError + Name string + Partitions []*PartitionMetadata +} + +func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + tm.Err = KError(tmp) + + tm.Name, err = pd.getString() + if err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + tm.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + tm.Partitions[i] = new(PartitionMetadata) + err = tm.Partitions[i].decode(pd) + if err != nil { + return err + } + } + + return nil +} + +func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(tm.Err)) + + err = pe.putString(tm.Name) + if err != nil { + return err + } + + err = pe.putArrayLength(len(tm.Partitions)) + if err != nil { + return err + } + + for _, pm := range tm.Partitions { + err = pm.encode(pe) + if err != nil { + return err + } + } + + return nil +} + +type MetadataResponse struct { + Brokers []*Broker + Topics []*TopicMetadata +} + +func (m *MetadataResponse) decode(pd packetDecoder) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + m.Brokers = make([]*Broker, n) + for i := 0; i < n; i++ { + m.Brokers[i] = new(Broker) + err = m.Brokers[i].decode(pd) + if err != nil { + return err + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + m.Topics = make([]*TopicMetadata, n) + for i := 0; i < n; i++ { + m.Topics[i] = new(TopicMetadata) + err = m.Topics[i].decode(pd) + if err != nil { + return err + } + } + + return nil +} + +func (m *MetadataResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(m.Brokers)) + if err != nil { + return err + } + for _, broker := range m.Brokers { + err = broker.encode(pe) + if err != nil { + return err + } + } + + err = pe.putArrayLength(len(m.Topics)) + if err != nil { + return err + } + for _, tm := range m.Topics { + err = tm.encode(pe) + if err != nil { + return err + } + } + + return nil +} + +// testing API + +func (m *MetadataResponse) AddBroker(addr string, id int32) { + m.Brokers = append(m.Brokers, &Broker{id: id, addr: addr}) +} + +func (m *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range m.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + m.Topics = append(m.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (m *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { + tmatch := m.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + + pmatch.Leader = brokerID + pmatch.Replicas = replicas + pmatch.Isr = isr + pmatch.Err = err + +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go new file mode 100644 index 000000000..1f1a51549 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go @@ -0,0 +1,139 @@ +package sarama + +import "testing" + +var ( + emptyMetadataResponse = []byte{ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + brokersNoTopicsMetadataResponse = []byte{ + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x00, 0xab, 0xff, + 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', + 0x00, 0x00, 0x00, 0x33, + + 0x00, 0x01, 0x02, 0x03, + 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', + 0x00, 0x00, 0x01, 0x11, + + 0x00, 0x00, 0x00, 0x00} + + topicsNoBrokersMetadataResponse = []byte{ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x00, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x04, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x07, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, + 0x00, 0x03, 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x00} +) + +func TestEmptyMetadataResponse(t *testing.T) { + response := MetadataResponse{} + + testDecodable(t, "empty", &response, emptyMetadataResponse) + if len(response.Brokers) != 0 { + t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") + } + if len(response.Topics) != 0 { + t.Error("Decoding produced", len(response.Topics), "topics where there were none!") + } +} + +func TestMetadataResponseWithBrokers(t *testing.T) { + response := MetadataResponse{} + + testDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse) + if len(response.Brokers) != 2 { + t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!") + } + + if response.Brokers[0].id != 0xabff { + t.Error("Decoding produced invalid broker 0 id.") + } + if response.Brokers[0].addr != "localhost:51" { + t.Error("Decoding produced invalid broker 0 address.") + } + if response.Brokers[1].id != 0x010203 { + t.Error("Decoding produced invalid broker 1 id.") + } + if response.Brokers[1].addr != "google.com:273" { + t.Error("Decoding produced invalid broker 1 address.") + } + + if len(response.Topics) != 0 { + t.Error("Decoding produced", len(response.Topics), "topics where there were none!") + } +} + +func TestMetadataResponseWithTopics(t *testing.T) { + response := MetadataResponse{} + + testDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse) + if len(response.Brokers) != 0 { + t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") + } + + if len(response.Topics) != 2 { + t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!") + } + + if response.Topics[0].Err != ErrNoError { + t.Error("Decoding produced invalid topic 0 error.") + } + + if response.Topics[0].Name != "foo" { + t.Error("Decoding produced invalid topic 0 name.") + } + + if len(response.Topics[0].Partitions) != 1 { + t.Fatal("Decoding produced invalid partition count for topic 0.") + } + + if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize { + t.Error("Decoding produced invalid topic 0 partition 0 error.") + } + + if response.Topics[0].Partitions[0].ID != 0x01 { + t.Error("Decoding produced invalid topic 0 partition 0 id.") + } + + if response.Topics[0].Partitions[0].Leader != 0x07 { + t.Error("Decoding produced invalid topic 0 partition 0 leader.") + } + + if len(response.Topics[0].Partitions[0].Replicas) != 3 { + t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.") + } + for i := 0; i < 3; i++ { + if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) { + t.Error("Decoding produced invalid topic 0 partition 0 replica", i) + } + } + + if len(response.Topics[0].Partitions[0].Isr) != 0 { + t.Error("Decoding produced invalid topic 0 partition 0 isr length.") + } + + if response.Topics[1].Err != ErrNoError { + t.Error("Decoding produced invalid topic 1 error.") + } + + if response.Topics[1].Name != "bar" { + t.Error("Decoding produced invalid topic 0 name.") + } + + if len(response.Topics[1].Partitions) != 0 { + t.Error("Decoding produced invalid partition count for topic 1.") + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go new file mode 100644 index 000000000..987697380 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go @@ -0,0 +1,273 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type requestHandlerFunc func(req *request) (res encoder) + +// mockBroker is a mock Kafka broker. It consists of a TCP server on a +// kernel-selected localhost port that can accept many connections. It reads +// Kafka requests from that connection and passes them to the user specified +// handler function (see SetHandler) that generates respective responses. If +// the handler has not been explicitly specified then the broker returns +// responses set by the Returns function in the exact order they were provided. +// (if a response has a len of 0, nothing is sent, and the client request will +// timeout in this case). +// +// When running tests with one of these, it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type mockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoder + listener net.Listener + t *testing.T + latency time.Duration + handler requestHandlerFunc + history []RequestResponse + lock sync.Mutex +} + +type RequestResponse struct { + Request requestBody + Response encoder +} + +func (b *mockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *mockBroker) SetHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *mockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.SetHandler(func(req *request) (res encoder) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +func (b *mockBroker) BrokerID() int32 { + return b.brokerID +} + +func (b *mockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +func (b *mockBroker) Port() int32 { + return b.port +} + +func (b *mockBroker) Addr() string { + return b.listener.Addr().String() +} + +func (b *mockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +func (b *mockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + safeClose(b.t, b.listener) + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *mockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + resHeader := make([]byte, 8) + for { + req, err := decodeRequest(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + continue + } + + binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) + binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *mockBroker) defaultRequestHandler(req *request) (res encoder) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *mockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// newMockBroker launches a fake Kafka broker. It takes a *testing.T as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the *testing.T and the broker exits. +func newMockBroker(t *testing.T, brokerID int32) *mockBroker { + return newMockBrokerAddr(t, brokerID, "localhost:0") +} + +// newMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func newMockBrokerAddr(t *testing.T, brokerID int32, addr string) *mockBroker { + var err error + + broker := &mockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoder, 512), + } + broker.handler = broker.defaultRequestHandler + + broker.listener, err = net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *mockBroker) Returns(e encoder) { + b.expectations <- e +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go new file mode 100644 index 000000000..655d9fb3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go @@ -0,0 +1,411 @@ +package sarama + +import ( + "testing" +) + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. +type MockResponse interface { + For(reqBody decoder) (res encoder) +} + +type mockWrapper struct { + res encoder +} + +func (mw *mockWrapper) For(reqBody decoder) (res encoder) { + return mw.res +} + +func newMockWrapper(res encoder) *mockWrapper { + return &mockWrapper{res: res} +} + +// mockMetadataResponse is a `MetadataResponse` builder. +type mockMetadataResponse struct { + leaders map[string]map[int32]int32 + brokers map[string]int32 + t *testing.T +} + +func newMockMetadataResponse(t *testing.T) *mockMetadataResponse { + return &mockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *mockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *mockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *mockMetadataResponse) SetBroker(addr string, brokerID int32) *mockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mor *mockMetadataResponse) For(reqBody decoder) encoder { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{} + for addr, brokerID := range mor.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mor.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mor.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse +} + +// mockOffsetResponse is an `OffsetResponse` builder. +type mockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t *testing.T +} + +func newMockOffsetResponse(t *testing.T) *mockOffsetResponse { + return &mockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *mockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *mockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *mockOffsetResponse) For(reqBody decoder) encoder { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *mockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// mockFetchResponse is a `FetchResponse` builder. +type mockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t *testing.T + batchSize int +} + +func newMockFetchResponse(t *testing.T, batchSize int) *mockFetchResponse { + return &mockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *mockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *mockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *mockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *mockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *mockFetchResponse) For(reqBody decoder) encoder { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{} + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *mockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *mockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *mockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// mockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type mockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t *testing.T +} + +func newMockConsumerMetadataResponse(t *testing.T) *mockConsumerMetadataResponse { + return &mockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *mockConsumerMetadataResponse) SetCoordinator(group string, broker *mockBroker) *mockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *mockConsumerMetadataResponse) SetError(group string, kerror KError) *mockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *mockConsumerMetadataResponse) For(reqBody decoder) encoder { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *mockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// mockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type mockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t *testing.T +} + +func newMockOffsetCommitResponse(t *testing.T) *mockOffsetCommitResponse { + return &mockOffsetCommitResponse{t: t} +} + +func (mr *mockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *mockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *mockOffsetCommitResponse) For(reqBody decoder) encoder { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *mockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// mockProduceResponse is a `ProduceResponse` builder. +type mockProduceResponse struct { + errors map[string]map[int32]KError + t *testing.T +} + +func newMockProduceResponse(t *testing.T) *mockProduceResponse { + return &mockProduceResponse{t: t} +} + +func (mr *mockProduceResponse) SetError(topic string, partition int32, kerror KError) *mockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *mockProduceResponse) For(reqBody decoder) encoder { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{} + for topic, partitions := range req.msgSets { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *mockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// mockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type mockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + t *testing.T +} + +func newMockOffsetFetchResponse(t *testing.T) *mockOffsetFetchResponse { + return &mockOffsetFetchResponse{t: t} +} + +func (mr *mockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *mockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + return mr +} + +func (mr *mockOffsetFetchResponse) For(reqBody decoder) encoder { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{} + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + return res +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md new file mode 100644 index 000000000..55a6c2e61 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md @@ -0,0 +1,13 @@ +# sarama/mocks + +The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. +You can use them to test your sarama applications using dependency injection. + +The following mock objects are available: + +- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. +- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) +- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) + +The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, +and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go new file mode 100644 index 000000000..6ccf1f145 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go @@ -0,0 +1,142 @@ +package mocks + +import ( + "sync" + + "github.com/Shopify/sarama" +) + +// AsyncProducer implements sarama's Producer interface for testing purposes. +// Before you can send messages to it's Input channel, you have to set expectations +// so it knows how to handle the input. This way you can easily test success and +// failure scenarios. +type AsyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + closed chan struct{} + input chan *sarama.ProducerMessage + successes chan *sarama.ProducerMessage + errors chan *sarama.ProducerError + lastOffset int64 +} + +// NewAsyncProducer instantiates a new Producer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is used to determine whether it +// should ack successes on the Successes channel. +func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { + if config == nil { + config = sarama.NewConfig() + } + mp := &AsyncProducer{ + t: t, + closed: make(chan struct{}, 0), + expectations: make([]*producerExpectation, 0), + input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), + } + + go func() { + defer func() { + close(mp.successes) + close(mp.errors) + }() + + for msg := range mp.input { + mp.l.Lock() + if mp.expectations == nil || len(mp.expectations) == 0 { + mp.expectations = nil + mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + } else { + expectation := mp.expectations[0] + mp.expectations = mp.expectations[1:] + if expectation.Result == errProduceSuccess { + mp.lastOffset++ + if config.Producer.Return.Successes { + msg.Offset = mp.lastOffset + mp.successes <- msg + } + } else { + if config.Producer.Return.Errors { + mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} + } + } + } + mp.l.Unlock() + } + + mp.l.Lock() + if len(mp.expectations) > 0 { + mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) + } + mp.l.Unlock() + + close(mp.closed) + }() + + return mp +} + +//////////////////////////////////////////////// +// Implement Producer interface +//////////////////////////////////////////////// + +// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) AsyncClose() { + close(mp.input) +} + +// Close corresponds with the Close method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) Close() error { + mp.AsyncClose() + <-mp.closed + return nil +} + +// Input corresponds with the Input method of sarama's Producer implementation. +// You have to set expectations on the mock producer before writing messages to the Input +// channel, so it knows how to handle them. If there is no more remaining expectations and +// a messages is written to the Input channel, the mock producer will write an error to the test +// state object. +func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { + return mp.input +} + +// Successes corresponds with the Successes method of sarama's Producer implementation. +func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return mp.successes +} + +// Errors corresponds with the Errors method of sarama's Producer implementation. +func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { + return mp.errors +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it is produced successfully, +// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting +// is set to true. +func (mp *AsyncProducer) ExpectInputAndSucceed() { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess}) +} + +// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it failed to produce +// successfully. This means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputAndFail(err error) { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: err}) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go new file mode 100644 index 000000000..520bf58b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go @@ -0,0 +1,94 @@ +package mocks + +import ( + "fmt" + "testing" + + "github.com/Shopify/sarama" +) + +type testReporterMock struct { + errors []string +} + +func newTestReporterMock() *testReporterMock { + return &testReporterMock{errors: make([]string, 0)} +} + +func (trm *testReporterMock) Errorf(format string, args ...interface{}) { + trm.errors = append(trm.errors, fmt.Sprintf(format, args...)) +} + +func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) { + var mp interface{} = &AsyncProducer{} + if _, ok := mp.(sarama.AsyncProducer); !ok { + t.Error("The mock producer should implement the sarama.Producer interface.") + } +} + +func TestProducerReturnsExpectationsToChannels(t *testing.T) { + config := sarama.NewConfig() + config.Producer.Return.Successes = true + mp := NewAsyncProducer(t, config) + + mp.ExpectInputAndSucceed() + mp.ExpectInputAndSucceed() + mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) + + mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"} + mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"} + mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"} + + msg1 := <-mp.Successes() + msg2 := <-mp.Successes() + err1 := <-mp.Errors() + + if msg1.Topic != "test 1" { + t.Error("Expected message 1 to be returned first") + } + + if msg2.Topic != "test 2" { + t.Error("Expected message 2 to be returned second") + } + + if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers { + t.Error("Expected message 3 to be returned as error") + } + + if err := mp.Close(); err != nil { + t.Error(err) + } +} + +func TestProducerWithTooFewExpectations(t *testing.T) { + trm := newTestReporterMock() + mp := NewAsyncProducer(trm, nil) + mp.ExpectInputAndSucceed() + + mp.Input() <- &sarama.ProducerMessage{Topic: "test"} + mp.Input() <- &sarama.ProducerMessage{Topic: "test"} + + if err := mp.Close(); err != nil { + t.Error(err) + } + + if len(trm.errors) != 1 { + t.Error("Expected to report an error") + } +} + +func TestProducerWithTooManyExpectations(t *testing.T) { + trm := newTestReporterMock() + mp := NewAsyncProducer(trm, nil) + mp.ExpectInputAndSucceed() + mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) + + mp.Input() <- &sarama.ProducerMessage{Topic: "test"} + if err := mp.Close(); err != nil { + t.Error(err) + } + + if len(trm.errors) != 1 { + t.Error("Expected to report an error") + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go new file mode 100644 index 000000000..acf0894ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go @@ -0,0 +1,316 @@ +package mocks + +import ( + "sync" + "sync/atomic" + + "github.com/Shopify/sarama" +) + +// Consumer implements sarama's Consumer interface for testing purposes. +// Before you can start consuming from this consumer, you have to register +// topic/partitions using ExpectConsumePartition, and set expectations on them. +type Consumer struct { + l sync.Mutex + t ErrorReporter + config *sarama.Config + partitionConsumers map[string]map[int32]*PartitionConsumer + metadata map[string][]int32 +} + +// NewConsumer returns a new mock Consumer instance. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is currently unused and can be set to nil. +func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { + if config == nil { + config = sarama.NewConfig() + } + + c := &Consumer{ + t: t, + config: config, + partitionConsumers: make(map[string]map[int32]*PartitionConsumer), + } + return c +} + +/////////////////////////////////////////////////// +// Consumer interface implementation +/////////////////////////////////////////////////// + +// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. +// Before you can start consuming a partition, you have to set expectations on it using +// ExpectConsumePartition. You can only consume a partition once per consumer. +func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { + c.t.Errorf("No expectations set for %s/%d", topic, partition) + return nil, errOutOfExpectations + } + + pc := c.partitionConsumers[topic][partition] + if pc.consumed { + return nil, sarama.ConfigurationError("The topic/partition is already being consumed") + } + + if pc.offset != AnyOffset && pc.offset != offset { + c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) + } + + pc.consumed = true + go pc.handleExpectations() + return pc, nil +} + +// Topics returns a list of topics, as registered with SetMetadata +func (c *Consumer) Topics() ([]string, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") + return nil, sarama.ErrOutOfBrokers + } + + var result []string + for topic, _ := range c.metadata { + result = append(result, topic) + } + return result, nil +} + +// Partitions returns the list of parititons for the given topic, as registered with SetMetadata +func (c *Consumer) Partitions(topic string) ([]int32, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") + return nil, sarama.ErrOutOfBrokers + } + if c.metadata[topic] == nil { + return nil, sarama.ErrUnknownTopicOrPartition + } + + return c.metadata[topic], nil +} + +// Close implements the Close method from the sarama.Consumer interface. It will close +// all registered PartitionConsumer instances. +func (c *Consumer) Close() error { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + _ = partitionConsumer.Close() + } + } + + return nil +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// SetMetadata sets the clusters topic/partition metadata, +// which will be returned by Topics() and Partitions(). +func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + c.metadata = metadata +} + +// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. +// The registered PartitionConsumer will be returned, so you can set expectations +// on it using method chanining. Once a topic/partition is registered, you are +// expected to start consuming it using ConsumePartition. If that doesn't happen, +// an error will be written to the error reporter once the mock consumer is closed. It will +// also expect that the +func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil { + c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) + } + + if c.partitionConsumers[topic][partition] == nil { + c.partitionConsumers[topic][partition] = &PartitionConsumer{ + t: c.t, + topic: topic, + partition: partition, + offset: offset, + expectations: make(chan *consumerExpectation, 1000), + messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), + } + } + + return c.partitionConsumers[topic][partition] +} + +/////////////////////////////////////////////////// +// PartitionConsumer mock type +/////////////////////////////////////////////////// + +// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. +// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is +// registered first using the Consumer's ExpectConsumePartition method. Before consuming the +// Errors and Messages channel, you should specify what values will be provided on these +// channels using YieldMessage and YieldError. +type PartitionConsumer struct { + l sync.Mutex + t ErrorReporter + topic string + partition int32 + offset int64 + expectations chan *consumerExpectation + messages chan *sarama.ConsumerMessage + errors chan *sarama.ConsumerError + singleClose sync.Once + consumed bool + errorsShouldBeDrained bool + messagesShouldBeDrained bool + highWaterMarkOffset int64 +} + +func (pc *PartitionConsumer) handleExpectations() { + pc.l.Lock() + defer pc.l.Unlock() + + for ex := range pc.expectations { + if ex.Err != nil { + pc.errors <- &sarama.ConsumerError{ + Topic: pc.topic, + Partition: pc.partition, + Err: ex.Err, + } + } else { + atomic.AddInt64(&pc.highWaterMarkOffset, 1) + + ex.Msg.Topic = pc.topic + ex.Msg.Partition = pc.partition + ex.Msg.Offset = atomic.LoadInt64(&pc.highWaterMarkOffset) + + pc.messages <- ex.Msg + } + } + + close(pc.messages) + close(pc.errors) +} + +/////////////////////////////////////////////////// +// PartitionConsumer interface implementation +/////////////////////////////////////////////////// + +// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) AsyncClose() { + pc.singleClose.Do(func() { + close(pc.expectations) + }) +} + +// Close implements the Close method from the sarama.PartitionConsumer interface. It will +// verify whether the partition consumer was actually started. +func (pc *PartitionConsumer) Close() error { + if !pc.consumed { + pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) + return errPartitionConsumerNotStarted + } + + if pc.errorsShouldBeDrained && len(pc.errors) > 0 { + pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) + } + + if pc.messagesShouldBeDrained && len(pc.messages) > 0 { + pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) + } + + pc.AsyncClose() + + var ( + closeErr error + wg sync.WaitGroup + ) + + wg.Add(1) + go func() { + defer wg.Done() + + var errs = make(sarama.ConsumerErrors, 0) + for err := range pc.errors { + errs = append(errs, err) + } + + if len(errs) > 0 { + closeErr = errs + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for _ = range pc.messages { + // drain + } + }() + + wg.Wait() + return closeErr +} + +// Errors implements the Errors method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { + return pc.errors +} + +// Messages implements the Messages method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { + return pc.messages +} + +func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// YieldMessage will yield a messages Messages channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this +// message was consumed from the Messages channel, because there are legitimate +// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will +// verify that the channel is empty on close. +func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { + pc.expectations <- &consumerExpectation{Msg: msg} +} + +// YieldError will yield an error on the Errors channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this error was +// consumed from the Errors channel, because there are legitimate reasons for this +// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that +// the channel is empty on close. +func (pc *PartitionConsumer) YieldError(err error) { + pc.expectations <- &consumerExpectation{Err: err} +} + +// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer +// that the messages channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { + pc.messagesShouldBeDrained = true +} + +// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer +// that the errors channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { + pc.errorsShouldBeDrained = true +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go new file mode 100644 index 000000000..50dad3a69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go @@ -0,0 +1,249 @@ +package mocks + +import ( + "sort" + "testing" + + "github.com/Shopify/sarama" +) + +func TestMockConsumerImplementsConsumerInterface(t *testing.T) { + var c interface{} = &Consumer{} + if _, ok := c.(sarama.Consumer); !ok { + t.Error("The mock consumer should implement the sarama.Consumer interface.") + } + + var pc interface{} = &PartitionConsumer{} + if _, ok := pc.(sarama.PartitionConsumer); !ok { + t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.") + } +} + +func TestConsumerHandlesExpectations(t *testing.T) { + consumer := NewConsumer(t, nil) + defer func() { + if err := consumer.Close(); err != nil { + t.Error(err) + } + }() + + consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) + consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) + consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")}) + consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")}) + + pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) + if err != nil { + t.Fatal(err) + } + test0_msg := <-pc_test0.Messages() + if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" { + t.Error("Message was not as expected:", test0_msg) + } + test0_err := <-pc_test0.Errors() + if test0_err.Err != sarama.ErrOutOfBrokers { + t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err) + } + + pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) + if err != nil { + t.Fatal(err) + } + test1_msg := <-pc_test1.Messages() + if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" { + t.Error("Message was not as expected:", test1_msg) + } + + pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest) + if err != nil { + t.Fatal(err) + } + other0_msg := <-pc_other0.Messages() + if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" { + t.Error("Message was not as expected:", other0_msg) + } +} + +func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) { + consumer := NewConsumer(t, nil) + consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) + consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) + + pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) + if err != nil { + t.Fatal(err) + } + + select { + case <-pc.Messages(): + t.Error("Did not epxect a message on the messages channel.") + case err := <-pc.Errors(): + if err.Err != sarama.ErrOutOfBrokers { + t.Error("Expected sarama.ErrOutOfBrokers, found", err) + } + } + + errs := pc.Close().(sarama.ConsumerErrors) + if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers { + t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers") + } +} + +func TestConsumerWithoutExpectationsOnPartition(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + + _, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) + if err != errOutOfExpectations { + t.Error("Expected ConsumePartition to return errOutOfExpectations") + } + + if err := consumer.Close(); err != nil { + t.Error("No error expected on close, but found:", err) + } + + if len(trm.errors) != 1 { + t.Errorf("Expected an expectation failure to be set on the error reporter.") + } +} + +func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) + + if err := consumer.Close(); err != nil { + t.Error("No error expected on close, but found:", err) + } + + if len(trm.errors) != 1 { + t.Errorf("Expected an expectation failure to be set on the error reporter.") + } +} + +func TestConsumerWithWrongOffsetExpectation(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) + + _, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest) + if err != nil { + t.Error("Did not expect error, found:", err) + } + + if len(trm.errors) != 1 { + t.Errorf("Expected an expectation failure to be set on the error reporter.") + } + + if err := consumer.Close(); err != nil { + t.Error(err) + } +} + +func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) + pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) + pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) + pcmock.ExpectMessagesDrainedOnClose() + + pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) + if err != nil { + t.Error(err) + } + + // consume first message, not second one + <-pc.Messages() + + if err := consumer.Close(); err != nil { + t.Error(err) + } + + if len(trm.errors) != 1 { + t.Errorf("Expected an expectation failure to be set on the error reporter.") + } +} + +func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + + pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) + pcmock.YieldError(sarama.ErrInvalidMessage) + pcmock.YieldError(sarama.ErrInvalidMessage) + pcmock.ExpectErrorsDrainedOnClose() + + pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) + if err != nil { + t.Error(err) + } + + // consume first and second error, + <-pc.Errors() + <-pc.Errors() + + if err := consumer.Close(); err != nil { + t.Error(err) + } + + if len(trm.errors) != 0 { + t.Errorf("Expected no expectation failures to be set on the error reporter.") + } +} + +func TestConsumerTopicMetadata(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + + consumer.SetTopicMetadata(map[string][]int32{ + "test1": []int32{0, 1, 2, 3}, + "test2": []int32{0, 1, 2, 3, 4, 5, 6, 7}, + }) + + topics, err := consumer.Topics() + if err != nil { + t.Error(t) + } + + sortedTopics := sort.StringSlice(topics) + sortedTopics.Sort() + if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" { + t.Error("Unexpected topics returned:", sortedTopics) + } + + partitions1, err := consumer.Partitions("test1") + if err != nil { + t.Error(t) + } + + if len(partitions1) != 4 { + t.Error("Unexpected partitions returned:", len(partitions1)) + } + + partitions2, err := consumer.Partitions("test2") + if err != nil { + t.Error(t) + } + + if len(partitions2) != 8 { + t.Error("Unexpected partitions returned:", len(partitions2)) + } + + if len(trm.errors) != 0 { + t.Errorf("Expected no expectation failures to be set on the error reporter.") + } +} + +func TestConsumerUnexpectedTopicMetadata(t *testing.T) { + trm := newTestReporterMock() + consumer := NewConsumer(trm, nil) + + if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers { + t.Error("Expected sarama.ErrOutOfBrokers, found", err) + } + + if len(trm.errors) != 1 { + t.Errorf("Expected an expectation failure to be set on the error reporter.") + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go new file mode 100644 index 000000000..ab24beebd --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go @@ -0,0 +1,43 @@ +/* +Package mocks provides mocks that can be used for testing applications +that use Sarama. The mock types provided by this package implement the +interfaces Sarama exports, so you can use them for dependency injection +in your tests. + +All mock instances require you to set expectations on them before you +can use them. It will determine how the mock will behave. If an +expectation is not met, it will make your test fail. + +NOTE: this package currently does not fall under the API stability +guarantee of Sarama as it is still considered experimental. +*/ +package mocks + +import ( + "errors" + + "github.com/Shopify/sarama" +) + +// A simple interface that includes the testing.T methods we use to report +// expectation violations when using the mock objects. +type ErrorReporter interface { + Errorf(string, ...interface{}) +} + +var ( + errProduceSuccess error = nil + errOutOfExpectations = errors.New("No more expectations set on mock") + errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") +) + +const AnyOffset int64 = -1000 + +type producerExpectation struct { + Result error +} + +type consumerExpectation struct { + Err error + Msg *sarama.ConsumerMessage +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go new file mode 100644 index 000000000..be59ecdb4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go @@ -0,0 +1,93 @@ +package mocks + +import ( + "github.com/Shopify/sarama" + "sync" +) + +// SyncProducer implements sarama's SyncProducer interface for testing purposes. +// Before you can use it, you have to set expectations on the mock SyncProducer +// to tell it how to handle calls to SendMessage, so you can easily test success +// and failure scenarios. +type SyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + lastOffset int64 +} + +// NewSyncProducer instantiates a new SyncProducer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is currently unused, but is +// maintained to be compatible with the async Producer. +func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { + return &SyncProducer{ + t: t, + expectations: make([]*producerExpectation, 0), + } +} + +//////////////////////////////////////////////// +// Implement SyncProducer interface +//////////////////////////////////////////////// + +// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessage, so it knows +// how to handle them. If there is no more remaining expectations when SendMessage is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + expectation := sp.expectations[0] + sp.expectations = sp.expectations[1:] + + if expectation.Result == errProduceSuccess { + sp.lastOffset++ + msg.Offset = sp.lastOffset + return 0, msg.Offset, nil + } else { + return -1, -1, expectation.Result + } + } else { + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations + } +} + +// Close corresponds with the Close method of sarama's SyncProducer implementation. +// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, +// so it will write an error to the test state if there's any remaining expectations. +func (sp *SyncProducer) Close() error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) + } + + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it produced successfully, i.e. by +// returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageAndSucceed() { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess}) +} + +// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it failed to produce +// successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: err}) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go new file mode 100644 index 000000000..a674138e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go @@ -0,0 +1,98 @@ +package mocks + +import ( + "testing" + + "github.com/Shopify/sarama" +) + +func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) { + var mp interface{} = &SyncProducer{} + if _, ok := mp.(sarama.SyncProducer); !ok { + t.Error("The mock async producer should implement the sarama.SyncProducer interface.") + } +} + +func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) { + sp := NewSyncProducer(t, nil) + defer func() { + if err := sp.Close(); err != nil { + t.Error(err) + } + }() + + sp.ExpectSendMessageAndSucceed() + sp.ExpectSendMessageAndSucceed() + sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) + + msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} + + _, offset, err := sp.SendMessage(msg) + if err != nil { + t.Errorf("The first message should have been produced successfully, but got %s", err) + } + if offset != 1 || offset != msg.Offset { + t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) + } + + _, offset, err = sp.SendMessage(msg) + if err != nil { + t.Errorf("The second message should have been produced successfully, but got %s", err) + } + if offset != 2 || offset != msg.Offset { + t.Errorf("The second message should have been assigned offset 2, but got %d", offset) + } + + _, _, err = sp.SendMessage(msg) + if err != sarama.ErrOutOfBrokers { + t.Errorf("The third message should not have been produced successfully") + } + + if err := sp.Close(); err != nil { + t.Error(err) + } +} + +func TestSyncProducerWithTooManyExpectations(t *testing.T) { + trm := newTestReporterMock() + + sp := NewSyncProducer(trm, nil) + sp.ExpectSendMessageAndSucceed() + sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) + + msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} + if _, _, err := sp.SendMessage(msg); err != nil { + t.Error("No error expected on first SendMessage call", err) + } + + if err := sp.Close(); err != nil { + t.Error(err) + } + + if len(trm.errors) != 1 { + t.Error("Expected to report an error") + } +} + +func TestSyncProducerWithTooFewExpectations(t *testing.T) { + trm := newTestReporterMock() + + sp := NewSyncProducer(trm, nil) + sp.ExpectSendMessageAndSucceed() + + msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} + if _, _, err := sp.SendMessage(msg); err != nil { + t.Error("No error expected on first SendMessage call", err) + } + if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations { + t.Error("errOutOfExpectations expected on second SendMessage call, found:", err) + } + + if err := sp.Close(); err != nil { + t.Error(err) + } + + if len(trm.errors) != 1 { + t.Error("Expected to report an error") + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go new file mode 100644 index 000000000..ba4ac76aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go @@ -0,0 +1,172 @@ +package sarama + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (r *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(r.offset) + if version == 1 { + pe.putInt64(r.timestamp) + } else if r.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(r.metadata) +} + +func (r *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if r.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if r.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + r.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.8.3 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 2 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder) (err error) { + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go new file mode 100644 index 000000000..afc25b7b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go @@ -0,0 +1,90 @@ +package sarama + +import "testing" + +var ( + offsetCommitRequestNoBlocksV0 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x00} + + offsetCommitRequestNoBlocksV1 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x00} + + offsetCommitRequestNoBlocksV2 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, + 0x00, 0x00, 0x00, 0x00} + + offsetCommitRequestOneBlockV0 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x52, 0x21, + 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, + 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} + + offsetCommitRequestOneBlockV1 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x52, 0x21, + 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} + + offsetCommitRequestOneBlockV2 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x52, 0x21, + 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, + 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} +) + +func TestOffsetCommitRequestV0(t *testing.T) { + request := new(OffsetCommitRequest) + request.Version = 0 + request.ConsumerGroup = "foobar" + testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0) + + request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") + testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0) +} + +func TestOffsetCommitRequestV1(t *testing.T) { + request := new(OffsetCommitRequest) + request.ConsumerGroup = "foobar" + request.ConsumerID = "cons" + request.ConsumerGroupGeneration = 0x1122 + request.Version = 1 + testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1) + + request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata") + testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1) +} + +func TestOffsetCommitRequestV2(t *testing.T) { + request := new(OffsetCommitRequest) + request.ConsumerGroup = "foobar" + request.ConsumerID = "cons" + request.ConsumerGroupGeneration = 0x1122 + request.RetentionTime = 0x4433 + request.Version = 2 + testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2) + + request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") + testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go new file mode 100644 index 000000000..573a3b6a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go @@ -0,0 +1,73 @@ +package sarama + +type OffsetCommitResponse struct { + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go new file mode 100644 index 000000000..074ec9232 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go @@ -0,0 +1,24 @@ +package sarama + +import ( + "testing" +) + +var ( + emptyOffsetCommitResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} +) + +func TestEmptyOffsetCommitResponse(t *testing.T) { + response := OffsetCommitResponse{} + testResponse(t, "empty", &response, emptyOffsetCommitResponse) +} + +func TestNormalOffsetCommitResponse(t *testing.T) { + response := OffsetCommitResponse{} + response.AddError("t", 0, ErrNotLeaderForPartition) + response.Errors["m"] = make(map[int32]KError) + // The response encoded form cannot be checked for it varies due to + // unpredictable map traversal order. + testResponse(t, "normal", &response, nil) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go new file mode 100644 index 000000000..30bbbbbd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go @@ -0,0 +1,71 @@ +package sarama + +type OffsetFetchRequest struct { + ConsumerGroup string + Version int16 + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 1 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + if err = pe.putString(r.ConsumerGroup); err != nil { + return err + } + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder) (err error) { + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + if partitionCount == 0 { + return nil + } + r.partitions = make(map[string][]int32) + for i := 0; i < partitionCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + r.partitions[topic] = partitions + } + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go new file mode 100644 index 000000000..025d725c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go @@ -0,0 +1,31 @@ +package sarama + +import "testing" + +var ( + offsetFetchRequestNoGroupNoPartitions = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + offsetFetchRequestNoPartitions = []byte{ + 0x00, 0x04, 'b', 'l', 'a', 'h', + 0x00, 0x00, 0x00, 0x00} + + offsetFetchRequestOnePartition = []byte{ + 0x00, 0x04, 'b', 'l', 'a', 'h', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', + 0x00, 0x00, 0x00, 0x01, + 0x4F, 0x4F, 0x4F, 0x4F} +) + +func TestOffsetFetchRequest(t *testing.T) { + request := new(OffsetFetchRequest) + testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions) + + request.ConsumerGroup = "blah" + testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions) + + request.AddPartition("topicTheFirst", 0x4F4F4F4F) + testRequest(t, "one partition", request, offsetFetchRequestOnePartition) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go new file mode 100644 index 000000000..93078c350 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go @@ -0,0 +1,131 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + Metadata string + Err KError +} + +func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { + r.Offset, err = pd.getInt64() + if err != nil { + return err + } + + r.Metadata, err = pd.getString() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(tmp) + + return nil +} + +func (r *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt64(r.Offset) + + err = pe.putString(r.Metadata) + if err != nil { + return err + } + + pe.putInt16(int16(r.Err)) + + return nil +} + +type OffsetFetchResponse struct { + Blocks map[string]map[int32]*OffsetFetchResponseBlock +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + for topic, partitions := range r.Blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go new file mode 100644 index 000000000..7614ae424 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go @@ -0,0 +1,22 @@ +package sarama + +import "testing" + +var ( + emptyOffsetFetchResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} +) + +func TestEmptyOffsetFetchResponse(t *testing.T) { + response := OffsetFetchResponse{} + testResponse(t, "empty", &response, emptyOffsetFetchResponse) +} + +func TestNormalOffsetFetchResponse(t *testing.T) { + response := OffsetFetchResponse{} + response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut}) + response.Blocks["m"] = nil + // The response encoded form cannot be checked for it varies due to + // unpredictable map traversal order. + testResponse(t, "normal", &response, nil) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go new file mode 100644 index 000000000..842d5c0f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go @@ -0,0 +1,113 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 +} + +func (r *offsetRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(int64(r.time)) + pe.putInt32(r.maxOffsets) + return nil +} + +func (r *offsetRequestBlock) decode(pd packetDecoder) (err error) { + if r.time, err = pd.getInt64(); err != nil { + return err + } + if r.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +type OffsetRequest struct { + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + pe.putInt32(-1) // replica ID is always -1 for clients + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder) error { + // Ignore replica ID + if _, err := pd.getInt32(); err != nil { + return err + } + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return 0 +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + tmp.maxOffsets = maxOffsets + + r.blocks[topic][partitionID] = tmp +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go new file mode 100644 index 000000000..f3b3046bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go @@ -0,0 +1,26 @@ +package sarama + +import "testing" + +var ( + offsetRequestNoBlocks = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00} + + offsetRequestOneBlock = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02} +) + +func TestOffsetRequest(t *testing.T) { + request := new(OffsetRequest) + testRequest(t, "no blocks", request, offsetRequestNoBlocks) + + request.AddBlock("foo", 4, 1, 2) + testRequest(t, "one block", request, offsetRequestOneBlock) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go new file mode 100644 index 000000000..07d71ca72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go @@ -0,0 +1,130 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 +} + +func (r *OffsetResponseBlock) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(tmp) + + r.Offsets, err = pd.getInt64Array() + + return err +} + +func (r *OffsetResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(r.Err)) + + return pe.putInt64Array(r.Offsets) +} + +type OffsetResponse struct { + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}} +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go new file mode 100644 index 000000000..a427cbd20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go @@ -0,0 +1,62 @@ +package sarama + +import "testing" + +var ( + emptyOffsetResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} + + normalOffsetResponse = []byte{ + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x01, 'a', + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x01, 'z', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06} +) + +func TestEmptyOffsetResponse(t *testing.T) { + response := OffsetResponse{} + + testDecodable(t, "empty", &response, emptyOffsetResponse) + if len(response.Blocks) != 0 { + t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") + } +} + +func TestNormalOffsetResponse(t *testing.T) { + response := OffsetResponse{} + + testDecodable(t, "normal", &response, normalOffsetResponse) + + if len(response.Blocks) != 2 { + t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") + } + + if len(response.Blocks["a"]) != 0 { + t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") + } + + if len(response.Blocks["z"]) != 1 { + t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") + } + + if response.Blocks["z"][2].Err != ErrNoError { + t.Fatal("Decoding produced invalid error for topic z partition 2.") + } + + if len(response.Blocks["z"][2].Offsets) != 2 { + t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.") + } + + if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 { + t.Fatal("Decoding produced invalid offsets for topic z partition 2.") + } + +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go new file mode 100644 index 000000000..034222313 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go @@ -0,0 +1,44 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getArrayLength() (int, error) + + // Collections + getBytes() ([]byte, error) + getString() (string, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go new file mode 100644 index 000000000..2c5710938 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go @@ -0,0 +1,41 @@ +package sarama + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putArrayLength(in int) error + + // Collections + putBytes(in []byte) error + putRawBytes(in []byte) error + putString(in string) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go new file mode 100644 index 000000000..493ee0ec3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go @@ -0,0 +1,120 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + Partition(message *ProducerMessage, numPartitions int32) (int32, error) // Partition takes a message and partition count and chooses a partition + + // RequiresConsistency indicates to the user of the partitioner whether the mapping of key->partition is consistent or not. + // Specifically, if a partitioner requires consistency then it must be allowed to choose from all partitions (even ones known to + // be unavailable), and its choice must be respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil, or fails to +// encode, then a random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key +// is used, modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + hash := int32(p.hasher.Sum32()) + if hash < 0 { + hash = -hash + } + return hash % numPartitions, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go new file mode 100644 index 000000000..f44c509d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go @@ -0,0 +1,198 @@ +package sarama + +import ( + "crypto/rand" + "log" + "testing" +) + +func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) { + choice, err := partitioner.Partition(message, numPartitions) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= numPartitions { + t.Error(partitioner, "returned partition", choice, "outside of range for", message) + } + for i := 1; i < 50; i++ { + newChoice, err := partitioner.Partition(message, numPartitions) + if err != nil { + t.Error(partitioner, err) + } + if newChoice != choice { + t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".") + } + } +} + +func TestRandomPartitioner(t *testing.T) { + partitioner := NewRandomPartitioner("mytopic") + + choice, err := partitioner.Partition(nil, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + for i := 1; i < 50; i++ { + choice, err := partitioner.Partition(nil, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= 50 { + t.Error("Returned partition", choice, "outside of range.") + } + } +} + +func TestRoundRobinPartitioner(t *testing.T) { + partitioner := NewRoundRobinPartitioner("mytopic") + + choice, err := partitioner.Partition(nil, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + var i int32 + for i = 1; i < 50; i++ { + choice, err := partitioner.Partition(nil, 7) + if err != nil { + t.Error(partitioner, err) + } + if choice != i%7 { + t.Error("Returned partition", choice, "expecting", i%7) + } + } +} + +func TestHashPartitioner(t *testing.T) { + partitioner := NewHashPartitioner("mytopic") + + choice, err := partitioner.Partition(&ProducerMessage{}, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + for i := 1; i < 50; i++ { + choice, err := partitioner.Partition(&ProducerMessage{}, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= 50 { + t.Error("Returned partition", choice, "outside of range for nil key.") + } + } + + buf := make([]byte, 256) + for i := 1; i < 50; i++ { + if _, err := rand.Read(buf); err != nil { + t.Error(err) + } + assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) + } +} + +func TestManualPartitioner(t *testing.T) { + partitioner := NewManualPartitioner("mytopic") + + choice, err := partitioner.Partition(&ProducerMessage{}, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + for i := int32(1); i < 50; i++ { + choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice != i { + t.Error("Returned partition not the same as the input partition") + } + } +} + +// By default, Sarama uses the message's key to consistently assign a partition to +// a message using hashing. If no key is set, a random partition will be chosen. +// This example shows how you can partition messages randomly, even when a key is set, +// by overriding Config.Producer.Partitioner. +func ExamplePartitioner_random() { + config := NewConfig() + config.Producer.Partitioner = NewRandomPartitioner + + producer, err := NewSyncProducer([]string{"localhost:9092"}, config) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := producer.Close(); err != nil { + log.Println("Failed to close producer:", err) + } + }() + + msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")} + partition, offset, err := producer.SendMessage(msg) + if err != nil { + log.Fatalln("Failed to produce message to kafka cluster.") + } + + log.Printf("Produced message to partition %d with offset %d", partition, offset) +} + +// This example shows how to assign partitions to your messages manually. +func ExamplePartitioner_manual() { + config := NewConfig() + + // First, we tell the producer that we are going to partition ourselves. + config.Producer.Partitioner = NewManualPartitioner + + producer, err := NewSyncProducer([]string{"localhost:9092"}, config) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := producer.Close(); err != nil { + log.Println("Failed to close producer:", err) + } + }() + + // Now, we set the Partition field of the ProducerMessage struct. + msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")} + + partition, offset, err := producer.SendMessage(msg) + if err != nil { + log.Fatalln("Failed to produce message to kafka cluster.") + } + + if partition != 6 { + log.Fatal("Message should have been produced to partition 6!") + } + + log.Printf("Produced message to partition %d with offset %d", partition, offset) +} + +// This example shows how to set a different partitioner depending on the topic. +func ExamplePartitioner_per_topic() { + config := NewConfig() + config.Producer.Partitioner = func(topic string) Partitioner { + switch topic { + case "access_log", "error_log": + return NewRandomPartitioner(topic) + + default: + return NewHashPartitioner(topic) + } + } + + // ... +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go new file mode 100644 index 000000000..ddeef780e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go @@ -0,0 +1,95 @@ +package sarama + +import ( + "fmt" + "math" +) + +type prepEncoder struct { + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length += 1 +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + pe.length += in.reserveLength() +} + +func (pe *prepEncoder) pop() error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go new file mode 100644 index 000000000..f21956137 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go @@ -0,0 +1,148 @@ +package sarama + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all replicas to commit before responding. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + RequiredAcks RequiredAcks + Timeout int32 + msgSets map[string]map[int32]*MessageSet +} + +func (p *ProduceRequest) encode(pe packetEncoder) error { + pe.putInt16(int16(p.RequiredAcks)) + pe.putInt32(p.Timeout) + err := pe.putArrayLength(len(p.msgSets)) + if err != nil { + return err + } + for topic, partitions := range p.msgSets { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, msgSet := range partitions { + pe.putInt32(id) + pe.push(&lengthField{}) + err = msgSet.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + } + } + return nil +} + +func (p *ProduceRequest) decode(pd packetDecoder) error { + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + p.RequiredAcks = RequiredAcks(requiredAcks) + if p.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + p.msgSets = make(map[string]map[int32]*MessageSet) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + p.msgSets[topic] = make(map[int32]*MessageSet) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + messageSetSize, err := pd.getInt32() + if err != nil { + return err + } + if messageSetSize == 0 { + continue + } + msgSetDecoder, err := pd.getSubset(int(messageSetSize)) + if err != nil { + return err + } + msgSet := &MessageSet{} + err = msgSet.decode(msgSetDecoder) + if err != nil { + return err + } + p.msgSets[topic][partition] = msgSet + } + } + return nil +} + +func (p *ProduceRequest) key() int16 { + return 0 +} + +func (p *ProduceRequest) version() int16 { + return 0 +} + +func (p *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + if p.msgSets == nil { + p.msgSets = make(map[string]map[int32]*MessageSet) + } + + if p.msgSets[topic] == nil { + p.msgSets[topic] = make(map[int32]*MessageSet) + } + + set := p.msgSets[topic][partition] + + if set == nil { + set = new(MessageSet) + p.msgSets[topic][partition] = set + } + + set.addMessage(msg) +} + +func (p *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + if p.msgSets == nil { + p.msgSets = make(map[string]map[int32]*MessageSet) + } + + if p.msgSets[topic] == nil { + p.msgSets[topic] = make(map[int32]*MessageSet) + } + + p.msgSets[topic][partition] = set +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go new file mode 100644 index 000000000..21f4ba5b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go @@ -0,0 +1,47 @@ +package sarama + +import ( + "testing" +) + +var ( + produceRequestEmpty = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + produceRequestHeader = []byte{ + 0x01, 0x23, + 0x00, 0x00, 0x04, 0x44, + 0x00, 0x00, 0x00, 0x00} + + produceRequestOneMessage = []byte{ + 0x01, 0x23, + 0x00, 0x00, 0x04, 0x44, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0xAD, + 0x00, 0x00, 0x00, 0x1C, + // messageSet + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, + // message + 0x23, 0x96, 0x4a, 0xf7, // CRC + 0x00, + 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} +) + +func TestProduceRequest(t *testing.T) { + request := new(ProduceRequest) + testRequest(t, "empty", request, produceRequestEmpty) + + request.RequiredAcks = 0x123 + request.Timeout = 0x444 + testRequest(t, "header", request, produceRequestHeader) + + request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}}) + testRequest(t, "one message", request, produceRequestOneMessage) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go new file mode 100644 index 000000000..1f49a8560 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go @@ -0,0 +1,112 @@ +package sarama + +type ProduceResponseBlock struct { + Err KError + Offset int64 +} + +func (pr *ProduceResponseBlock) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pr.Err = KError(tmp) + + pr.Offset, err = pd.getInt64() + if err != nil { + return err + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock +} + +func (pr *ProduceResponse) decode(pd packetDecoder) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + pr.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + pr.Blocks[name][id] = block + } + } + + return nil +} + +func (pr *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(pr.Blocks)) + if err != nil { + return err + } + for topic, partitions := range pr.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + pe.putInt16(int16(prb.Err)) + pe.putInt64(prb.Offset) + } + } + return nil +} + +func (pr *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if pr.Blocks == nil { + return nil + } + + if pr.Blocks[topic] == nil { + return nil + } + + return pr.Blocks[topic][partition] +} + +// Testing API + +func (pr *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if pr.Blocks == nil { + pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := pr.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + pr.Blocks[topic] = byTopic + } + byTopic[partition] = &ProduceResponseBlock{Err: err} +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go new file mode 100644 index 000000000..5c3131af4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go @@ -0,0 +1,67 @@ +package sarama + +import "testing" + +var ( + produceResponseNoBlocks = []byte{ + 0x00, 0x00, 0x00, 0x00} + + produceResponseManyBlocks = []byte{ + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x03, 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, + + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} +) + +func TestProduceResponse(t *testing.T) { + response := ProduceResponse{} + + testDecodable(t, "no blocks", &response, produceResponseNoBlocks) + if len(response.Blocks) != 0 { + t.Error("Decoding produced", len(response.Blocks), "topics where there were none") + } + + testDecodable(t, "many blocks", &response, produceResponseManyBlocks) + if len(response.Blocks) != 2 { + t.Error("Decoding produced", len(response.Blocks), "topics where there were 2") + } + if len(response.Blocks["foo"]) != 0 { + t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none") + } + if len(response.Blocks["bar"]) != 2 { + t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two") + } + block := response.GetBlock("bar", 1) + if block == nil { + t.Error("Decoding did not produce a block for bar/1") + } else { + if block.Err != ErrNoError { + t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err)) + } + if block.Offset != 0xFF { + t.Error("Decoding failed for bar/1/Offset, got:", block.Offset) + } + } + block = response.GetBlock("bar", 2) + if block == nil { + t.Error("Decoding did not produce a block for bar/2") + } else { + if block.Err != ErrInvalidMessage { + t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err)) + } + if block.Offset != 0 { + t.Error("Decoding failed for bar/2/Offset, got:", block.Offset) + } + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go new file mode 100644 index 000000000..b194b9bcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go @@ -0,0 +1,225 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off += 1 + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, PacketDecodingError{"invalid array length"} + } + return tmp, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + + if err != nil { + return nil, err + } + + n := int(tmp) + + switch { + case n < -1: + return nil, PacketDecodingError{"invalid byteslice length"} + case n == -1: + return nil, nil + case n == 0: + return make([]byte, 0), nil + case n > rd.remaining(): + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + tmpStr := rd.raw[rd.off : rd.off+n] + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getString() (string, error) { + tmp, err := rd.getInt16() + + if err != nil { + return "", err + } + + n := int(tmp) + + switch { + case n < -1: + return "", PacketDecodingError{"invalid string length"} + case n == -1: + return "", nil + case n == 0: + return "", nil + case n > rd.remaining(): + rd.off = len(rd.raw) + return "", ErrInsufficientData + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, PacketDecodingError{"invalid array length"} + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, PacketDecodingError{"invalid array length"} + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return &realDecoder{raw: rd.raw[start:rd.off]}, nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + reserve := in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go new file mode 100644 index 000000000..947ce98d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go @@ -0,0 +1,100 @@ +package sarama + +import "encoding/binary" + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off += 1 +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/request.go new file mode 100644 index 000000000..d6d5cdfcd --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/request.go @@ -0,0 +1,100 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type requestBody interface { + encoder + decoder + key() int16 + version() int16 +} + +type request struct { + correlationID int32 + clientID string + body requestBody +} + +func (r *request) encode(pe packetEncoder) (err error) { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + err = pe.putString(r.clientID) + if err != nil { + return err + } + err = r.body.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + var key int16 + if key, err = pd.getInt16(); err != nil { + return err + } + var version int16 + if version, err = pd.getInt16(); err != nil { + return err + } + if r.correlationID, err = pd.getInt32(); err != nil { + return err + } + r.clientID, err = pd.getString() + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + return r.body.decode(pd) +} + +func decodeRequest(r io.Reader) (req *request, err error) { + lengthBytes := make([]byte, 4) + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, err + } + + length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { + return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, err + } + + req = &request{} + if err := decode(encodedReq, req); err != nil { + return nil, err + } + return req, nil +} + +func allocateBody(key, version int16) requestBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{} + case 2: + return &OffsetRequest{} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{} + case 10: + return &ConsumerMetadataRequest{} + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go new file mode 100644 index 000000000..69e8b4cbe --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go @@ -0,0 +1,80 @@ +package sarama + +import ( + "bytes" + "reflect" + "testing" +) + +type testRequestBody struct { +} + +func (s *testRequestBody) key() int16 { + return 0x666 +} + +func (s *testRequestBody) version() int16 { + return 0xD2 +} + +func (s *testRequestBody) encode(pe packetEncoder) error { + return pe.putString("abc") +} + +// not specific to request tests, just helper functions for testing structures that +// implement the encoder or decoder interfaces that needed somewhere to live + +func testEncodable(t *testing.T, name string, in encoder, expect []byte) { + packet, err := encode(in) + if err != nil { + t.Error(err) + } else if !bytes.Equal(packet, expect) { + t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect) + } +} + +func testDecodable(t *testing.T, name string, out decoder, in []byte) { + err := decode(in, out) + if err != nil { + t.Error("Decoding", name, "failed:", err) + } +} + +func testRequest(t *testing.T, name string, rb requestBody, expected []byte) { + // Encoder request + req := &request{correlationID: 123, clientID: "foo", body: rb} + packet, err := encode(req) + headerSize := 14 + len("foo") + if err != nil { + t.Error(err) + } else if !bytes.Equal(packet[headerSize:], expected) { + t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expected) + } + // Decoder request + decoded, err := decodeRequest(bytes.NewReader(packet)) + if err != nil { + t.Error("Failed to decode request", err) + } else if decoded.correlationID != 123 || decoded.clientID != "foo" { + t.Errorf("Decoded header is not valid: %v", decoded) + } else if !reflect.DeepEqual(rb, decoded.body) { + t.Errorf("Decoded request does not match the encoded one\nencoded: %v\ndecoded: %v", rb, decoded) + } +} + +func testResponse(t *testing.T, name string, res encoder, expected []byte) { + encoded, err := encode(res) + if err != nil { + t.Error(err) + } else if expected != nil && !bytes.Equal(encoded, expected) { + t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected) + } + + decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(decoder) + if err := decode(encoded, decoded); err != nil { + t.Error("Decoding", name, "failed:", err) + } + + if !reflect.DeepEqual(decoded, res) { + t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go new file mode 100644 index 000000000..f3f4d27d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go @@ -0,0 +1,21 @@ +package sarama + +import "fmt" + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + return err +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go new file mode 100644 index 000000000..8f9fdb80c --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go @@ -0,0 +1,21 @@ +package sarama + +import "testing" + +var ( + responseHeaderBytes = []byte{ + 0x00, 0x00, 0x0f, 0x00, + 0x0a, 0xbb, 0xcc, 0xff} +) + +func TestResponseHeader(t *testing.T) { + header := responseHeader{} + + testDecodable(t, "response header", &header, responseHeaderBytes) + if header.length != 0xf00 { + t.Error("Decoding header length failed, got", header.length) + } + if header.correlationID != 0x0abbccff { + t.Error("Decoding header correlation id failed, got", header.correlationID) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go new file mode 100644 index 000000000..d59821750 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go @@ -0,0 +1,47 @@ +/* +Package sarama provides client libraries for the Kafka 0.8 protocol. The AsyncProducer object is the high-level +API for producing messages asynchronously; the SyncProducer provides a blocking API for the same purpose. +The Consumer object is the high-level API for consuming messages. The Client object provides metadata +management functionality that is shared between the higher-level objects. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire. + +The Request/Response objects and properties are mostly undocumented, as they line up exactly with the +protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +// Logger is the instance of a StdLogger interface that Sarama writes connection +// management events to. By default it is set to discard all log messages via ioutil.Discard, +// but you can set it to redirect wherever you want. +var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// PanicHandler is called for recovering from panics spawned internally to the library (and thus +// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. +var PanicHandler func(interface{}) + +// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying +// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned +// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt +// to process. +var MaxRequestSize int32 = 100 * 1024 * 1024 + +// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If +// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to +// protect the client from running out of memory. Please note that brokers do not have any natural limit on +// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers +// (see https://issues.apache.org/jira/browse/KAFKA-2063). +var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go new file mode 100644 index 000000000..e86cb7039 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go @@ -0,0 +1,41 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + + "github.com/golang/snappy" +) + +var snappyMagic = []byte{130, 83, 78, 65, 80, 80, 89, 0} + +// SnappyEncode encodes binary data +func snappyEncode(src []byte) []byte { + return snappy.Encode(nil, src) +} + +// SnappyDecode decodes snappy data +func snappyDecode(src []byte) ([]byte, error) { + if bytes.Equal(src[:8], snappyMagic) { + var ( + pos = uint32(16) + max = uint32(len(src)) + dst = make([]byte, 0, len(src)) + chunk []byte + err error + ) + for pos < max { + size := binary.BigEndian.Uint32(src[pos : pos+4]) + pos += 4 + + chunk, err = snappy.Decode(chunk, src[pos:pos+size]) + if err != nil { + return nil, err + } + pos += size + dst = append(dst, chunk...) + } + return dst, nil + } + return snappy.Decode(nil, src) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go new file mode 100644 index 000000000..f3cf7ff5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go @@ -0,0 +1,49 @@ +package sarama + +import ( + "bytes" + "testing" +) + +var snappyTestCases = map[string][]byte{ + "REPEATREPEATREPEATREPEATREPEATREPEAT": []byte{36, 20, 82, 69, 80, 69, 65, 84, 118, 6, 0}, + "REALLY SHORT": []byte{12, 44, 82, 69, 65, 76, 76, 89, 32, 83, 72, 79, 82, 84}, + "AXBXCXDXEXFX": []byte{12, 44, 65, 88, 66, 88, 67, 88, 68, 88, 69, 88, 70, 88}, +} + +var snappyStreamTestCases = map[string][]byte{ + "PLAINDATA": []byte{130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 11, 9, 32, 80, 76, 65, 73, 78, 68, 65, 84, 65}, + `{"a":"UtaitILHMDAAAAfU","b":"日本"}`: []byte{130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 39, 37, 144, 123, 34, 97, 34, 58, 34, 85, 116, 97, 105, 116, 73, 76, 72, 77, 68, 65, 65, 65, 65, 102, 85, 34, 44, 34, 98, 34, 58, 34, 230, 151, 165, 230, 156, 172, 34, 125}, + `Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias except`: []byte{130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 3, 89, 128, 8, 240, 90, 83, 101, 100, 32, 117, 116, 32, 112, 101, 114, 115, 112, 105, 99, 105, 97, 116, 105, 115, 32, 117, 110, 100, 101, 32, 111, 109, 110, 105, 115, 32, 105, 115, 116, 101, 32, 110, 97, 116, 117, 115, 32, 101, 114, 114, 111, 114, 32, 115, 105, 116, 32, 118, 111, 108, 117, 112, 116, 97, 116, 101, 109, 32, 97, 99, 99, 117, 115, 97, 110, 116, 105, 117, 109, 32, 100, 111, 108, 111, 114, 101, 109, 113, 117, 101, 32, 108, 97, 117, 100, 97, 5, 22, 240, 60, 44, 32, 116, 111, 116, 97, 109, 32, 114, 101, 109, 32, 97, 112, 101, 114, 105, 97, 109, 44, 32, 101, 97, 113, 117, 101, 32, 105, 112, 115, 97, 32, 113, 117, 97, 101, 32, 97, 98, 32, 105, 108, 108, 111, 32, 105, 110, 118, 101, 110, 116, 111, 114, 101, 32, 118, 101, 114, 105, 116, 97, 1, 141, 4, 101, 116, 1, 36, 88, 115, 105, 32, 97, 114, 99, 104, 105, 116, 101, 99, 116, 111, 32, 98, 101, 97, 116, 97, 101, 32, 118, 105, 1, 6, 120, 100, 105, 99, 116, 97, 32, 115, 117, 110, 116, 32, 101, 120, 112, 108, 105, 99, 97, 98, 111, 46, 32, 78, 101, 109, 111, 32, 101, 110, 105, 109, 5, 103, 0, 109, 46, 180, 0, 12, 113, 117, 105, 97, 17, 16, 0, 115, 5, 209, 72, 97, 115, 112, 101, 114, 110, 97, 116, 117, 114, 32, 97, 117, 116, 32, 111, 100, 105, 116, 5, 9, 36, 102, 117, 103, 105, 116, 44, 32, 115, 101, 100, 9, 53, 32, 99, 111, 110, 115, 101, 113, 117, 117, 110, 1, 42, 20, 109, 97, 103, 110, 105, 32, 9, 245, 16, 115, 32, 101, 111, 115, 1, 36, 28, 32, 114, 97, 116, 105, 111, 110, 101, 17, 96, 33, 36, 1, 51, 36, 105, 32, 110, 101, 115, 99, 105, 117, 110, 116, 1, 155, 1, 254, 16, 112, 111, 114, 114, 111, 1, 51, 36, 115, 113, 117, 97, 109, 32, 101, 115, 116, 44, 1, 14, 13, 81, 5, 183, 4, 117, 109, 1, 18, 0, 97, 9, 19, 4, 32, 115, 1, 149, 12, 109, 101, 116, 44, 9, 135, 76, 99, 116, 101, 116, 117, 114, 44, 32, 97, 100, 105, 112, 105, 115, 99, 105, 32, 118, 101, 108, 50, 173, 0, 24, 110, 111, 110, 32, 110, 117, 109, 9, 94, 84, 105, 117, 115, 32, 109, 111, 100, 105, 32, 116, 101, 109, 112, 111, 114, 97, 32, 105, 110, 99, 105, 100, 33, 52, 20, 117, 116, 32, 108, 97, 98, 33, 116, 4, 101, 116, 9, 106, 0, 101, 5, 219, 20, 97, 109, 32, 97, 108, 105, 5, 62, 33, 164, 8, 114, 97, 116, 29, 212, 12, 46, 32, 85, 116, 41, 94, 52, 97, 100, 32, 109, 105, 110, 105, 109, 97, 32, 118, 101, 110, 105, 33, 221, 72, 113, 117, 105, 115, 32, 110, 111, 115, 116, 114, 117, 109, 32, 101, 120, 101, 114, 99, 105, 33, 202, 104, 111, 110, 101, 109, 32, 117, 108, 108, 97, 109, 32, 99, 111, 114, 112, 111, 114, 105, 115, 32, 115, 117, 115, 99, 105, 112, 105, 13, 130, 8, 105, 111, 115, 1, 64, 12, 110, 105, 115, 105, 1, 150, 5, 126, 44, 105, 100, 32, 101, 120, 32, 101, 97, 32, 99, 111, 109, 5, 192, 0, 99, 41, 131, 33, 172, 8, 63, 32, 81, 1, 107, 4, 97, 117, 33, 101, 96, 118, 101, 108, 32, 101, 117, 109, 32, 105, 117, 114, 101, 32, 114, 101, 112, 114, 101, 104, 101, 110, 100, 101, 114, 105, 65, 63, 12, 105, 32, 105, 110, 1, 69, 16, 118, 111, 108, 117, 112, 65, 185, 1, 47, 24, 105, 116, 32, 101, 115, 115, 101, 1, 222, 64, 109, 32, 110, 105, 104, 105, 108, 32, 109, 111, 108, 101, 115, 116, 105, 97, 101, 46, 103, 0, 0, 44, 1, 45, 16, 32, 105, 108, 108, 117, 37, 143, 45, 36, 0, 109, 5, 110, 65, 33, 20, 97, 116, 32, 113, 117, 111, 17, 92, 44, 115, 32, 110, 117, 108, 108, 97, 32, 112, 97, 114, 105, 9, 165, 24, 65, 116, 32, 118, 101, 114, 111, 69, 34, 44, 101, 116, 32, 97, 99, 99, 117, 115, 97, 109, 117, 115, 1, 13, 104, 105, 117, 115, 116, 111, 32, 111, 100, 105, 111, 32, 100, 105, 103, 110, 105, 115, 115, 105, 109, 111, 115, 32, 100, 117, 99, 105, 1, 34, 80, 113, 117, 105, 32, 98, 108, 97, 110, 100, 105, 116, 105, 105, 115, 32, 112, 114, 97, 101, 115, 101, 101, 87, 17, 111, 56, 116, 117, 109, 32, 100, 101, 108, 101, 110, 105, 116, 105, 32, 97, 116, 65, 89, 28, 99, 111, 114, 114, 117, 112, 116, 105, 1, 150, 0, 115, 13, 174, 5, 109, 8, 113, 117, 97, 65, 5, 52, 108, 101, 115, 116, 105, 97, 115, 32, 101, 120, 99, 101, 112, 116, 0, 0, 0, 1, 0}, +} + +func TestSnappyEncode(t *testing.T) { + for src, exp := range snappyTestCases { + dst := snappyEncode([]byte(src)) + if !bytes.Equal(dst, exp) { + t.Errorf("Expected %s to generate %v, but was %v", src, exp, dst) + } + } +} + +func TestSnappyDecode(t *testing.T) { + for exp, src := range snappyTestCases { + dst, err := snappyDecode(src) + if err != nil { + t.Error("Encoding error: ", err) + } else if !bytes.Equal(dst, []byte(exp)) { + t.Errorf("Expected %s to be generated from %v, but was %s", exp, src, string(dst)) + } + } +} + +func TestSnappyDecodeStreams(t *testing.T) { + for exp, src := range snappyStreamTestCases { + dst, err := snappyDecode(src) + if err != nil { + t.Error("Encoding error: ", err) + } else if !bytes.Equal(dst, []byte(exp)) { + t.Errorf("Expected %s to be generated from [%d]byte, but was %s", exp, len(src), string(dst)) + } + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go new file mode 100644 index 000000000..b59d74a20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go @@ -0,0 +1,94 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages. It routes messages to the correct broker, refreshing metadata as appropriate, +// and parses responses for errors. You must call Close() on a producer to avoid leaks, it may not be garbage-collected automatically when +// it passes out of scope. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has succeeded or failed to produce. + // It will return the partition and the offset of the produced message, or an error if the message + // failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // Close shuts down the producer and flushes any messages it may have buffered. You must call this function before + // a producer object passes out of scope, as it may otherwise leak memory. You must call this before calling Close + // on the underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + p.conf.Producer.Return.Successes = true + p.conf.Producer.Return.Errors = true + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + oldMetadata := msg.Metadata + defer func() { + msg.Metadata = oldMetadata + }() + + expectation := make(chan error, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err + } else { + return msg.Partition, msg.Offset, nil + } +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.Metadata.(chan error) + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.Metadata.(chan error) + expectation <- err.Err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go new file mode 100644 index 000000000..d378949b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go @@ -0,0 +1,149 @@ +package sarama + +import ( + "log" + "sync" + "testing" +) + +func TestSyncProducer(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + for i := 0; i < 10; i++ { + leader.Returns(prodSuccess) + } + + producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + msg := &ProducerMessage{ + Topic: "my_topic", + Value: StringEncoder(TestMessage), + Metadata: "test", + } + + partition, offset, err := producer.SendMessage(msg) + + if partition != 0 || msg.Partition != partition { + t.Error("Unexpected partition") + } + if offset != 0 || msg.Offset != offset { + t.Error("Unexpected offset") + } + if str, ok := msg.Metadata.(string); !ok || str != "test" { + t.Error("Unexpected metadata") + } + if err != nil { + t.Error(err) + } + } + + safeClose(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestConcurrentSyncProducer(t *testing.T) { + seedBroker := newMockBroker(t, 1) + leader := newMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 100 + producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + wg := sync.WaitGroup{} + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)} + partition, _, err := producer.SendMessage(msg) + if partition != 0 { + t.Error("Unexpected partition") + } + if err != nil { + t.Error(err) + } + wg.Done() + }() + } + wg.Wait() + + safeClose(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestSyncProducerToNonExistingTopic(t *testing.T) { + broker := newMockBroker(t, 1) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError) + broker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + config.Producer.Retry.Max = 0 + + producer, err := NewSyncProducer([]string{broker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + metadataResponse = new(MetadataResponse) + metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) + broker.Returns(metadataResponse) + + _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"}) + if err != ErrUnknownTopicOrPartition { + t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err) + } + + safeClose(t, producer) + broker.Close() +} + +// This example shows the basic usage pattern of the SyncProducer. +func ExampleSyncProducer() { + producer, err := NewSyncProducer([]string{"localhost:9092"}, nil) + if err != nil { + log.Fatalln(err) + } + defer func() { + if err := producer.Close(); err != nil { + log.Fatalln(err) + } + }() + + msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} + partition, offset, err := producer.SendMessage(msg) + if err != nil { + log.Printf("FAILED to send message: %s\n", err) + } else { + log.Printf("> message sent to partition %d at offset %d\n", partition, offset) + } +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md new file mode 100644 index 000000000..3464c4ad8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md @@ -0,0 +1,10 @@ +# Sarama tools + +This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation. +Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function. + +- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer. +- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster. +- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster. + +To install all tools, run `go get github.com/Shopify/sarama/tools/...` diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore new file mode 100644 index 000000000..67da9dfa9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore @@ -0,0 +1,2 @@ +kafka-console-consumer +kafka-console-consumer.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md new file mode 100644 index 000000000..4e77f0b70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md @@ -0,0 +1,29 @@ +# kafka-console-consumer + +A simple command line tool to consume partitions of a topic and print the +messages on the standard output. + +### Installation + + go get github.com/Shopify/sarama/tools/kafka-console-consumer + +### Usage + + # Minimum invocation + kafka-console-consumer -topic=test -brokers=kafka1:9092 + + # It will pick up a KAFKA_PEERS environment variable + export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 + kafka-console-consumer -topic=test + + # You can specify the offset you want to start at. It can be either + # `oldest`, `newest`. The default is `newest`. + kafka-console-consumer -topic=test -offset=oldest + kafka-console-consumer -topic=test -offset=newest + + # You can specify the partition(s) you want to consume as a comma-separated + # list. The default is `all`. + kafka-console-consumer -topic=test -partitions=1,2,3 + + # Display all command line options + kafka-console-consumer -help diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go new file mode 100644 index 000000000..0f1eb89a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go @@ -0,0 +1,145 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/signal" + "strconv" + "strings" + "sync" + + "github.com/Shopify/sarama" +) + +var ( + brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") + topic = flag.String("topic", "", "REQUIRED: the topic to consume") + partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") + offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") + verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") + bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +func main() { + flag.Parse() + + if *brokerList == "" { + printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") + } + + if *topic == "" { + printUsageErrorAndExit("-topic is required") + } + + if *verbose { + sarama.Logger = logger + } + + var initialOffset int64 + switch *offset { + case "oldest": + initialOffset = sarama.OffsetOldest + case "newest": + initialOffset = sarama.OffsetNewest + default: + printUsageErrorAndExit("-offset should be `oldest` or `newest`") + } + + c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) + if err != nil { + printErrorAndExit(69, "Failed to start consumer: %s", err) + } + + partitionList, err := getPartitions(c) + if err != nil { + printErrorAndExit(69, "Failed to get the list of partitions: %s", err) + } + + var ( + messages = make(chan *sarama.ConsumerMessage, *bufferSize) + closing = make(chan struct{}) + wg sync.WaitGroup + ) + + go func() { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Kill, os.Interrupt) + <-signals + logger.Println("Initiating shutdown of consumer...") + close(closing) + }() + + for _, partition := range partitionList { + pc, err := c.ConsumePartition(*topic, partition, initialOffset) + if err != nil { + printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) + } + + go func(pc sarama.PartitionConsumer) { + <-closing + pc.AsyncClose() + }(pc) + + wg.Add(1) + go func(pc sarama.PartitionConsumer) { + defer wg.Done() + for message := range pc.Messages() { + messages <- message + } + }(pc) + } + + go func() { + for msg := range messages { + fmt.Printf("Partition:\t%d\n", msg.Partition) + fmt.Printf("Offset:\t%d\n", msg.Offset) + fmt.Printf("Key:\t%s\n", string(msg.Key)) + fmt.Printf("Value:\t%s\n", string(msg.Value)) + fmt.Println() + } + }() + + wg.Wait() + logger.Println("Done consuming topic", *topic) + close(messages) + + if err := c.Close(); err != nil { + logger.Println("Failed to close consumer: ", err) + } +} + +func getPartitions(c sarama.Consumer) ([]int32, error) { + if *partitions == "all" { + return c.Partitions(*topic) + } + + tmp := strings.Split(*partitions, ",") + var pList []int32 + for i := range tmp { + val, err := strconv.ParseInt(tmp[i], 10, 32) + if err != nil { + return nil, err + } + pList = append(pList, int32(val)) + } + + return pList, nil +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} + +func printUsageErrorAndExit(format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore new file mode 100644 index 000000000..5837fe8ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore @@ -0,0 +1,2 @@ +kafka-console-partitionconsumer +kafka-console-partitionconsumer.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md new file mode 100644 index 000000000..646dd5f5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md @@ -0,0 +1,28 @@ +# kafka-console-partitionconsumer + +NOTE: this tool is deprecated in favour of the more general and more powerful +`kafka-console-consumer`. + +A simple command line tool to consume a partition of a topic and print the messages +on the standard output. + +### Installation + + go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer + +### Usage + + # Minimum invocation + kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092 + + # It will pick up a KAFKA_PEERS environment variable + export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 + kafka-console-partitionconsumer -topic=test -partition=4 + + # You can specify the offset you want to start at. It can be either + # `oldest`, `newest`, or a specific offset number + kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest + kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337 + + # Display all command line options + kafka-console-partitionconsumer -help diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go new file mode 100644 index 000000000..d5e4464de --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go @@ -0,0 +1,102 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/signal" + "strconv" + "strings" + + "github.com/Shopify/sarama" +) + +var ( + brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") + topic = flag.String("topic", "", "REQUIRED: the topic to consume") + partition = flag.Int("partition", -1, "REQUIRED: the partition to consume") + offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset") + verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +func main() { + flag.Parse() + + if *brokerList == "" { + printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") + } + + if *topic == "" { + printUsageErrorAndExit("-topic is required") + } + + if *partition == -1 { + printUsageErrorAndExit("-partition is required") + } + + if *verbose { + sarama.Logger = logger + } + + var ( + initialOffset int64 + offsetError error + ) + switch *offset { + case "oldest": + initialOffset = sarama.OffsetOldest + case "newest": + initialOffset = sarama.OffsetNewest + default: + initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64) + } + + if offsetError != nil { + printUsageErrorAndExit("Invalid initial offset: %s", *offset) + } + + c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) + if err != nil { + printErrorAndExit(69, "Failed to start consumer: %s", err) + } + + pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset) + if err != nil { + printErrorAndExit(69, "Failed to start partition consumer: %s", err) + } + + go func() { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Kill, os.Interrupt) + <-signals + pc.AsyncClose() + }() + + for msg := range pc.Messages() { + fmt.Printf("Offset:\t%d\n", msg.Offset) + fmt.Printf("Key:\t%s\n", string(msg.Key)) + fmt.Printf("Value:\t%s\n", string(msg.Value)) + fmt.Println() + } + + if err := c.Close(); err != nil { + logger.Println("Failed to close consumer: ", err) + } +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} + +func printUsageErrorAndExit(format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore new file mode 100644 index 000000000..2b9e563a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore @@ -0,0 +1,2 @@ +kafka-console-producer +kafka-console-producer.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md new file mode 100644 index 000000000..6b3a65f21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md @@ -0,0 +1,34 @@ +# kafka-console-producer + +A simple command line tool to produce a single message to Kafka. + +### Installation + + go get github.com/Shopify/sarama/tools/kafka-console-producer + + +### Usage + + # Minimum invocation + kafka-console-producer -topic=test -value=value -brokers=kafka1:9092 + + # It will pick up a KAFKA_PEERS environment variable + export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 + kafka-console-producer -topic=test -value=value + + # It will read the value from stdin by using pipes + echo "hello world" | kafka-console-producer -topic=test + + # Specify a key: + echo "hello world" | kafka-console-producer -topic=test -key=key + + # Partitioning: by default, kafka-console-producer will partition as follows: + # - manual partitioning if a -partition is provided + # - hash partitioning by key if a -key is provided + # - random partioning otherwise. + # + # You can override this using the -partitioner argument: + echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random + + # Display all command line options + kafka-console-producer -help diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go new file mode 100644 index 000000000..6a1765d7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go @@ -0,0 +1,118 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + + "github.com/Shopify/sarama" +) + +var ( + brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable") + topic = flag.String("topic", "", "REQUIRED: the topic to produce to") + key = flag.String("key", "", "The key of the message to produce. Can be empty.") + value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.") + partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`") + partition = flag.Int("partition", -1, "The partition to produce to.") + verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr") + silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout") + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +func main() { + flag.Parse() + + if *brokerList == "" { + printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") + } + + if *topic == "" { + printUsageErrorAndExit("no -topic specified") + } + + if *verbose { + sarama.Logger = logger + } + + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll + + switch *partitioner { + case "": + if *partition >= 0 { + config.Producer.Partitioner = sarama.NewManualPartitioner + } else { + config.Producer.Partitioner = sarama.NewHashPartitioner + } + case "hash": + config.Producer.Partitioner = sarama.NewHashPartitioner + case "random": + config.Producer.Partitioner = sarama.NewRandomPartitioner + case "manual": + config.Producer.Partitioner = sarama.NewManualPartitioner + if *partition == -1 { + printUsageErrorAndExit("-partition is required when partitioning manually") + } + default: + printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) + } + + message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} + + if *key != "" { + message.Key = sarama.StringEncoder(*key) + } + + if *value != "" { + message.Value = sarama.StringEncoder(*value) + } else if stdinAvailable() { + bytes, err := ioutil.ReadAll(os.Stdin) + if err != nil { + printErrorAndExit(66, "Failed to read data from the standard input: %s", err) + } + message.Value = sarama.ByteEncoder(bytes) + } else { + printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") + } + + producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) + if err != nil { + printErrorAndExit(69, "Failed to open Kafka producer: %s", err) + } + defer func() { + if err := producer.Close(); err != nil { + logger.Println("Failed to close Kafka producer cleanly:", err) + } + }() + + partition, offset, err := producer.SendMessage(message) + if err != nil { + printErrorAndExit(69, "Failed to produce message: %s", err) + } else if !*silent { + fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) + } +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} + +func printUsageErrorAndExit(message string) { + fmt.Fprintln(os.Stderr, "ERROR:", message) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} + +func stdinAvailable() bool { + stat, _ := os.Stdin.Stat() + return (stat.Mode() & os.ModeCharDevice) == 0 +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go b/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go new file mode 100644 index 000000000..fef7c7381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go @@ -0,0 +1,89 @@ +package sarama + +import "sort" + +type none struct{} + +// make []int32 sortable so we can sort partition numbers +type int32Slice []int32 + +func (slice int32Slice) Len() int { + return len(slice) +} + +func (slice int32Slice) Less(i, j int) bool { + return slice[i] < slice[j] +} + +func (slice int32Slice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func dupeAndSort(input []int32) []int32 { + ret := make([]int32, 0, len(input)) + for _, val := range input { + ret = append(ret, val) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func withRecover(fn func()) { + defer func() { + handler := PanicHandler + if handler != nil { + if err := recover(); err != nil { + handler(err) + } + } + }() + + fn() +} + +func safeAsyncClose(b *Broker) { + tmp := b // local var prevents clobbering in goroutine + go withRecover(func() { + if connected, _ := tmp.Connected(); connected { + if err := tmp.Close(); err != nil { + Logger.Println("Error closing broker", tmp.ID(), ":", err) + } + } + }) +} + +// Encoder is a simple interface for any type that can be encoded as an array of bytes +// in order to be sent as the key or value of a Kafka message. Length() is provided as an +// optimization, and must return the same as len() on the result of Encode(). +type Encoder interface { + Encode() ([]byte, error) + Length() int +} + +// make strings and byte slices encodable for convenience so they can be used as keys +// and/or values in kafka messages + +// StringEncoder implements the Encoder interface for Go strings so that they can be used +// as the Key or Value in a ProducerMessage. +type StringEncoder string + +func (s StringEncoder) Encode() ([]byte, error) { + return []byte(s), nil +} + +func (s StringEncoder) Length() int { + return len(s) +} + +// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used +// as the Key or Value in a ProducerMessage. +type ByteEncoder []byte + +func (b ByteEncoder) Encode() ([]byte, error) { + return b, nil +} + +func (b ByteEncoder) Length() int { + return len(b) +} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh new file mode 100644 index 000000000..95e47dde4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +set -ex + +# Launch and wait for toxiproxy +${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh & +while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid +done diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf new file mode 100644 index 000000000..d975de438 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf @@ -0,0 +1,5 @@ +start on started zookeeper-ZK_PORT +stop on stopping zookeeper-ZK_PORT + +pre-start exec sleep 2 +exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh new file mode 100644 index 000000000..0a2d77785 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -ex + +apt-get update +yes | apt-get install default-jre + +export KAFKA_INSTALL_ROOT=/opt +export KAFKA_HOSTNAME=192.168.100.67 +export KAFKA_VERSION=0.8.2.1 +export REPOSITORY_ROOT=/vagrant + +sh /vagrant/vagrant/install_cluster.sh +sh /vagrant/vagrant/setup_services.sh +sh /vagrant/vagrant/create_topics.sh diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh new file mode 100644 index 000000000..e52c00e7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +set -ex + +${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 & +PID=$! + +while ! nc -q 1 localhost 8474 + +# The number of threads handling network requests +num.network.threads=2 + +# The number of threads doing disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=1048576 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=1048576 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma seperated list of directories under which to store log files +log.dirs=KAFKA_DATADIR + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=2 + +# Create new topics with a replication factor of 2 so failover can be tested +# more easily. +default.replication.factor=2 + +auto.create.topics.enable=false +delete.topic.enable=true + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining +# segments don't drop below log.retention.bytes. +log.retention.bytes=268435456 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=268435456 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=60000 + +# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires. +# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction. +log.cleaner.enable=false + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:ZK_PORT + +# Timeout in ms for connecting to zookeeper +zookeeper.session.timeout.ms=3000 +zookeeper.connection.timeout.ms=3000 diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh new file mode 100644 index 000000000..81d8ea05d --- /dev/null +++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +set -ex + +stop toxiproxy || true +cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf +cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/ +start toxiproxy + +for i in 1 2 3 4 5; do + ZK_PORT=`expr $i + 2180` + KAFKA_PORT=`expr $i + 9090` + + stop zookeeper-${ZK_PORT} || true + + # set up zk service + cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf + sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf + + # set up kafka service + cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf + sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf + sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf + + start zookeeper-${ZK_PORT} +done + +# Wait for the last kafka node to finish booting +while ! nc -q 1 localhost 29095  [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` + + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&logrus.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). + + ```go + logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) + ``` + +Third party logging formatters: + +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ÅÌ Í•Í–ÌšfÌÍÌ  ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗lÍ–ÍŽg̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..04673a075 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,256 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 000000000..98717df49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,53 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 000000000..a1623ec00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,50 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default + log.Level = logrus.DebugLevel +} + +func main() { + defer func() { + err := recover() + if err != nil { + log.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 000000000..cb5759a35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development")) +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..a67e1b802 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,188 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..104d689f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 000000000..c6d290c77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go new file mode 100644 index 000000000..8ea93ddf2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go @@ -0,0 +1,56 @@ +package logstash + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. + + // TimestampFormat sets the format used for timestamps. + TimestampFormat string +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + entry.Data["@version"] = 1 + + if f.TimestampFormat == "" { + f.TimestampFormat = logrus.DefaultTimestampFormat + } + + entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) + + // set message field + v, ok := entry.Data["message"] + if ok { + entry.Data["fields.message"] = v + } + entry.Data["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + entry.Data["fields.level"] = v + } + entry.Data["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + entry.Data["fields.type"] = v + } + entry.Data["type"] = f.Type + } + + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go new file mode 100644 index 000000000..d8814a0ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go @@ -0,0 +1,52 @@ +package logstash + +import ( + "bytes" + "encoding/json" + "github.com/Sirupsen/logrus" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestLogstashFormatter(t *testing.T) { + assert := assert.New(t) + + lf := LogstashFormatter{Type: "abc"} + + fields := logrus.Fields{ + "message": "def", + "level": "ijk", + "type": "lmn", + "one": 1, + "pi": 3.14, + "bool": true, + } + + entry := logrus.WithFields(fields) + entry.Message = "msg" + entry.Level = logrus.InfoLevel + + b, _ := lf.Format(entry) + + var data map[string]interface{} + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + dec.Decode(&data) + + // base fields + assert.Equal(json.Number("1"), data["@version"]) + assert.NotEmpty(data["@timestamp"]) + assert.Equal("abc", data["type"]) + assert.Equal("msg", data["message"]) + assert.Equal("info", data["level"]) + + // substituted fields + assert.Equal("def", data["fields.message"]) + assert.Equal("ijk", data["fields.level"]) + assert.Equal("lmn", data["fields.type"]) + + // formats + assert.Equal(json.Number("1"), data["one"]) + assert.Equal(json.Number("3.14"), data["pi"]) + assert.Equal(true, data["bool"]) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 000000000..13f34cb6f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go new file mode 100644 index 000000000..b0502c335 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go @@ -0,0 +1,54 @@ +package airbrake + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/tobi/airbrake-go" +) + +// AirbrakeHook to send exceptions to an exception-tracking service compatible +// with the Airbrake API. +type airbrakeHook struct { + APIKey string + Endpoint string + Environment string +} + +func NewHook(endpoint, apiKey, env string) *airbrakeHook { + return &airbrakeHook{ + APIKey: apiKey, + Endpoint: endpoint, + Environment: env, + } +} + +func (hook *airbrakeHook) Fire(entry *logrus.Entry) error { + airbrake.ApiKey = hook.APIKey + airbrake.Endpoint = hook.Endpoint + airbrake.Environment = hook.Environment + + var notifyErr error + err, ok := entry.Data["error"].(error) + if ok { + notifyErr = err + } else { + notifyErr = errors.New(entry.Message) + } + + airErr := airbrake.Notify(notifyErr) + if airErr != nil { + return fmt.Errorf("Failed to send error to Airbrake: %s", airErr) + } + + return nil +} + +func (hook *airbrakeHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go new file mode 100644 index 000000000..058a91e34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go @@ -0,0 +1,133 @@ +package airbrake + +import ( + "encoding/xml" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Sirupsen/logrus" +) + +type notice struct { + Error NoticeError `xml:"error"` +} +type NoticeError struct { + Class string `xml:"class"` + Message string `xml:"message"` +} + +type customErr struct { + msg string +} + +func (e *customErr) Error() string { + return e.msg +} + +const ( + testAPIKey = "abcxyz" + testEnv = "development" + expectedClass = "*airbrake.customErr" + expectedMsg = "foo" + unintendedMsg = "Airbrake will not see this string" +) + +var ( + noticeError = make(chan NoticeError, 1) +) + +// TestLogEntryMessageReceived checks if invoking Logrus' log.Error +// method causes an XML payload containing the log entry message is received +// by a HTTP server emulating an Airbrake-compatible endpoint. +func TestLogEntryMessageReceived(t *testing.T) { + log := logrus.New() + ts := startAirbrakeServer(t) + defer ts.Close() + + hook := NewHook(ts.URL, testAPIKey, "production") + log.Hooks.Add(hook) + + log.Error(expectedMsg) + + select { + case received := <-noticeError: + if received.Message != expectedMsg { + t.Errorf("Unexpected message received: %s", received.Message) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Airbrake API") + } +} + +// TestLogEntryMessageReceived confirms that, when passing an error type using +// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the +// error message returned by the Error() method on the error interface +// rather than the logrus.Entry.Message string. +func TestLogEntryWithErrorReceived(t *testing.T) { + log := logrus.New() + ts := startAirbrakeServer(t) + defer ts.Close() + + hook := NewHook(ts.URL, testAPIKey, "production") + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": &customErr{expectedMsg}, + }).Error(unintendedMsg) + + select { + case received := <-noticeError: + if received.Message != expectedMsg { + t.Errorf("Unexpected message received: %s", received.Message) + } + if received.Class != expectedClass { + t.Errorf("Unexpected error class: %s", received.Class) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Airbrake API") + } +} + +// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a +// non-error type using logrus.Fields, a HTTP server emulating an Airbrake +// endpoint receives the logrus.Entry.Message string. +// +// Only error types are supported when setting the 'error' field using +// logrus.WithFields(). +func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) { + log := logrus.New() + ts := startAirbrakeServer(t) + defer ts.Close() + + hook := NewHook(ts.URL, testAPIKey, "production") + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": expectedMsg, + }).Error(unintendedMsg) + + select { + case received := <-noticeError: + if received.Message != unintendedMsg { + t.Errorf("Unexpected message received: %s", received.Message) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Airbrake API") + } +} + +func startAirbrakeServer(t *testing.T) *httptest.Server { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var notice notice + if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil { + t.Error(err) + } + r.Body.Close() + + noticeError <- notice.Error + })) + + return ts +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go new file mode 100644 index 000000000..d20a0f54a --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go @@ -0,0 +1,68 @@ +package logrus_bugsnag + +import ( + "errors" + + "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" +) + +type bugsnagHook struct{} + +// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before +// bugsnag.Configure. Bugsnag must be configured before the hook. +var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") + +// ErrBugsnagSendFailed indicates that the hook failed to submit an error to +// bugsnag. The error was successfully generated, but `bugsnag.Notify()` +// failed. +type ErrBugsnagSendFailed struct { + err error +} + +func (e ErrBugsnagSendFailed) Error() string { + return "failed to send error to Bugsnag: " + e.err.Error() +} + +// NewBugsnagHook initializes a logrus hook which sends exceptions to an +// exception-tracking service compatible with the Bugsnag API. Before using +// this hook, you must call bugsnag.Configure(). The returned object should be +// registered with a log via `AddHook()` +// +// Entries that trigger an Error, Fatal or Panic should now include an "error" +// field to send to Bugsnag. +func NewBugsnagHook() (*bugsnagHook, error) { + if bugsnag.Config.APIKey == "" { + return nil, ErrBugsnagUnconfigured + } + return &bugsnagHook{}, nil +} + +// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the +// "error" field (or the Message if the error isn't present) and sends it off. +func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { + var notifyErr error + err, ok := entry.Data["error"].(error) + if ok { + notifyErr = err + } else { + notifyErr = errors.New(entry.Message) + } + + bugsnagErr := bugsnag.Notify(notifyErr) + if bugsnagErr != nil { + return ErrBugsnagSendFailed{bugsnagErr} + } + + return nil +} + +// Levels enumerates the log levels on which the error should be forwarded to +// bugsnag: everything at or above the "Error" level. +func (hook *bugsnagHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go new file mode 100644 index 000000000..e9ea298d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go @@ -0,0 +1,64 @@ +package logrus_bugsnag + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" +) + +type notice struct { + Events []struct { + Exceptions []struct { + Message string `json:"message"` + } `json:"exceptions"` + } `json:"events"` +} + +func TestNoticeReceived(t *testing.T) { + msg := make(chan string, 1) + expectedMsg := "foo" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var notice notice + data, _ := ioutil.ReadAll(r.Body) + if err := json.Unmarshal(data, ¬ice); err != nil { + t.Error(err) + } + _ = r.Body.Close() + + msg <- notice.Events[0].Exceptions[0].Message + })) + defer ts.Close() + + hook := &bugsnagHook{} + + bugsnag.Configure(bugsnag.Configuration{ + Endpoint: ts.URL, + ReleaseStage: "production", + APIKey: "12345678901234567890123456789012", + Synchronous: true, + }) + + log := logrus.New() + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": errors.New(expectedMsg), + }).Error("Bugsnag will not see this string") + + select { + case received := <-msg: + if received != expectedMsg { + t.Errorf("Unexpected message received: %s", received) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Bugsnag API") + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md new file mode 100644 index 000000000..ae61e9229 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md @@ -0,0 +1,28 @@ +# Papertrail Hook for Logrus :walrus: + +[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). + +In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. + +## Usage + +You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. + +For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/papertrail" +) + +func main() { + log := logrus.New() + hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go new file mode 100644 index 000000000..c0f10c1bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go @@ -0,0 +1,55 @@ +package logrus_papertrail + +import ( + "fmt" + "net" + "os" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + format = "Jan 2 15:04:05" +) + +// PapertrailHook to send logs to a logging service compatible with the Papertrail API. +type PapertrailHook struct { + Host string + Port int + AppName string + UDPConn net.Conn +} + +// NewPapertrailHook creates a hook to be added to an instance of logger. +func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { + conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) + return &PapertrailHook{host, port, appName, conn}, err +} + +// Fire is called when a log event is fired. +func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { + date := time.Now().Format(format) + msg, _ := entry.String() + payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg) + + bytesWritten, err := hook.UDPConn.Write([]byte(payload)) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) + return err + } + + return nil +} + +// Levels returns the available logging levels. +func (hook *PapertrailHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go new file mode 100644 index 000000000..96318d003 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go @@ -0,0 +1,26 @@ +package logrus_papertrail + +import ( + "fmt" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/stvp/go-udp-testing" +) + +func TestWritingToUDP(t *testing.T) { + port := 16661 + udp.SetAddr(fmt.Sprintf(":%d", port)) + + hook, err := NewPapertrailHook("localhost", port, "test") + if err != nil { + t.Errorf("Unable to connect to local UDP server.") + } + + log := logrus.New() + log.Hooks.Add(hook) + + udp.ShouldReceive(t, "foo", func() { + log.Info("foo") + }) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md new file mode 100644 index 000000000..31de6540a --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md @@ -0,0 +1,111 @@ +# Sentry Hook for Logrus :walrus: + +[Sentry](https://getsentry.com) provides both self-hosted and hosted +solutions for exception tracking. +Both client and server are +[open source](https://github.com/getsentry/sentry). + +## Usage + +Every sentry application defined on the server gets a different +[DSN](https://www.getsentry.com/docs/). In the example below replace +`YOUR_DSN` with the one created for your application. + +```go +import ( + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/sentry" +) + +func main() { + log := logrus.New() + hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + }) + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags: + +```go +tags := map[string]string{ + "site": "example.com", +} +levels := []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, +} +hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels) + +``` + +If you wish to initialize a SentryHook with an already initialized raven client, you can use +the `NewWithClientSentryHook` constructor: + +```go +import ( + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/sentry" + "github.com/getsentry/raven-go" +) + +func main() { + log := logrus.New() + + client, err := raven.New(YOUR_DSN) + if err != nil { + log.Fatal(err) + } + + hook, err := logrus_sentry.NewWithClientSentryHook(client, []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + }) + + if err == nil { + log.Hooks.Add(hook) + } +} + +hook, err := NewWithClientSentryHook(client, []logrus.Level{ + logrus.ErrorLevel, +}) +``` + +## Special fields + +Some logrus fields have a special meaning in this hook, +these are `server_name`, `logger` and `http_request`. +When logs are sent to sentry these fields are treated differently. +- `server_name` (also known as hostname) is the name of the server which +is logging the event (hostname.example.com) +- `logger` is the part of the application which is logging the event. +In go this usually means setting it to the name of the package. +- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry. + +## Timeout + +`Timeout` is the time the sentry hook will wait for a response +from the sentry server. + +If this time elapses with no response from +the server an error will be returned. + +If `Timeout` is set to 0 the SentryHook will not wait for a reply +and will assume a correct delivery. + +The SentryHook has a default timeout of `100 milliseconds` when created +with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field: + +```go +hook, _ := logrus_sentry.NewSentryHook(...) +hook.Timeout = 20*time.Second +``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go new file mode 100644 index 000000000..cf88098a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go @@ -0,0 +1,137 @@ +package logrus_sentry + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/getsentry/raven-go" +) + +var ( + severityMap = map[logrus.Level]raven.Severity{ + logrus.DebugLevel: raven.DEBUG, + logrus.InfoLevel: raven.INFO, + logrus.WarnLevel: raven.WARNING, + logrus.ErrorLevel: raven.ERROR, + logrus.FatalLevel: raven.FATAL, + logrus.PanicLevel: raven.FATAL, + } +) + +func getAndDel(d logrus.Fields, key string) (string, bool) { + var ( + ok bool + v interface{} + val string + ) + if v, ok = d[key]; !ok { + return "", false + } + + if val, ok = v.(string); !ok { + return "", false + } + delete(d, key) + return val, true +} + +func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) { + var ( + ok bool + v interface{} + req *http.Request + ) + if v, ok = d[key]; !ok { + return nil, false + } + if req, ok = v.(*http.Request); !ok || req == nil { + return nil, false + } + delete(d, key) + return req, true +} + +// SentryHook delivers logs to a sentry server. +type SentryHook struct { + // Timeout sets the time to wait for a delivery error from the sentry server. + // If this is set to zero the server will not wait for any response and will + // consider the message correctly sent + Timeout time.Duration + + client *raven.Client + levels []logrus.Level +} + +// NewSentryHook creates a hook to be added to an instance of logger +// and initializes the raven client. +// This method sets the timeout to 100 milliseconds. +func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { + client, err := raven.New(DSN) + if err != nil { + return nil, err + } + return &SentryHook{100 * time.Millisecond, client, levels}, nil +} + +// NewWithTagsSentryHook creates a hook with tags to be added to an instance +// of logger and initializes the raven client. This method sets the timeout to +// 100 milliseconds. +func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) { + client, err := raven.NewWithTags(DSN, tags) + if err != nil { + return nil, err + } + return &SentryHook{100 * time.Millisecond, client, levels}, nil +} + +// NewWithClientSentryHook creates a hook using an initialized raven client. +// This method sets the timeout to 100 milliseconds. +func NewWithClientSentryHook(client *raven.Client, levels []logrus.Level) (*SentryHook, error) { + return &SentryHook{100 * time.Millisecond, client, levels}, nil +} + +// Called when an event should be sent to sentry +// Special fields that sentry uses to give more information to the server +// are extracted from entry.Data (if they are found) +// These fields are: logger, server_name and http_request +func (hook *SentryHook) Fire(entry *logrus.Entry) error { + packet := &raven.Packet{ + Message: entry.Message, + Timestamp: raven.Timestamp(entry.Time), + Level: severityMap[entry.Level], + Platform: "go", + } + + d := entry.Data + + if logger, ok := getAndDel(d, "logger"); ok { + packet.Logger = logger + } + if serverName, ok := getAndDel(d, "server_name"); ok { + packet.ServerName = serverName + } + if req, ok := getAndDelRequest(d, "http_request"); ok { + packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req)) + } + packet.Extra = map[string]interface{}(d) + + _, errCh := hook.client.Capture(packet, nil) + timeout := hook.Timeout + if timeout != 0 { + timeoutCh := time.After(timeout) + select { + case err := <-errCh: + return err + case <-timeoutCh: + return fmt.Errorf("no response from sentry server in %s", timeout) + } + } + return nil +} + +// Levels returns the available logging levels. +func (hook *SentryHook) Levels() []logrus.Level { + return hook.levels +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go new file mode 100644 index 000000000..4a97bc63e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go @@ -0,0 +1,154 @@ +package logrus_sentry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/getsentry/raven-go" +) + +const ( + message = "error message" + server_name = "testserver.internal" + logger_name = "test.logger" +) + +func getTestLogger() *logrus.Logger { + l := logrus.New() + l.Out = ioutil.Discard + return l +} + +func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) { + pch := make(chan *raven.Packet, 1) + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + d := json.NewDecoder(req.Body) + p := &raven.Packet{} + err := d.Decode(p) + if err != nil { + t.Fatal(err.Error()) + } + + pch <- p + })) + defer s.Close() + + fragments := strings.SplitN(s.URL, "://", 2) + dsn := fmt.Sprintf( + "%s://public:secret@%s/sentry/project-id", + fragments[0], + fragments[1], + ) + tf(dsn, pch) +} + +func TestSpecialFields(t *testing.T) { + WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { + logger := getTestLogger() + + hook, err := NewSentryHook(dsn, []logrus.Level{ + logrus.ErrorLevel, + }) + + if err != nil { + t.Fatal(err.Error()) + } + logger.Hooks.Add(hook) + + req, _ := http.NewRequest("GET", "url", nil) + logger.WithFields(logrus.Fields{ + "server_name": server_name, + "logger": logger_name, + "http_request": req, + }).Error(message) + + packet := <-pch + if packet.Logger != logger_name { + t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger) + } + + if packet.ServerName != server_name { + t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName) + } + }) +} + +func TestSentryHandler(t *testing.T) { + WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { + logger := getTestLogger() + hook, err := NewSentryHook(dsn, []logrus.Level{ + logrus.ErrorLevel, + }) + if err != nil { + t.Fatal(err.Error()) + } + logger.Hooks.Add(hook) + + logger.Error(message) + packet := <-pch + if packet.Message != message { + t.Errorf("message should have been %s, was %s", message, packet.Message) + } + }) +} + +func TestSentryWithClient(t *testing.T) { + WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { + logger := getTestLogger() + + client, _ := raven.New(dsn) + + hook, err := NewWithClientSentryHook(client, []logrus.Level{ + logrus.ErrorLevel, + }) + if err != nil { + t.Fatal(err.Error()) + } + logger.Hooks.Add(hook) + + logger.Error(message) + packet := <-pch + if packet.Message != message { + t.Errorf("message should have been %s, was %s", message, packet.Message) + } + }) +} + +func TestSentryTags(t *testing.T) { + WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { + logger := getTestLogger() + tags := map[string]string{ + "site": "test", + } + levels := []logrus.Level{ + logrus.ErrorLevel, + } + + hook, err := NewWithTagsSentryHook(dsn, tags, levels) + if err != nil { + t.Fatal(err.Error()) + } + + logger.Hooks.Add(hook) + + logger.Error(message) + packet := <-pch + expected := raven.Tags{ + raven.Tag{ + Key: "site", + Value: "test", + }, + } + if !reflect.DeepEqual(packet.Tags, expected) { + t.Errorf("message should have been %s, was %s", message, packet.Message) + } + }) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 000000000..4dbb8e729 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,20 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 000000000..b6fa37462 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,59 @@ +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 000000000..42762dc10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,26 @@ +package logrus_syslog + +import ( + "github.com/Sirupsen/logrus" + "log/syslog" + "testing" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..2ad6dc5cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 000000000..1d7087325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..dd9975931 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,206 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Ff you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..43ee12e90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,94 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var _ StdLogger = &log.Logger{} + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 000000000..efaacea23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,301 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..71f8d67a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd openbsd netbsd dragonfly + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..a2c0b40db --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..4bb537602 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..2e09f6f7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..17cc29848 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,159 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + f.appendKeyValue(b, "msg", entry.Message) + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + + switch value := value.(type) { + case string: + if needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", value) + } + default: + fmt.Fprint(b, value) + } + + b.WriteByte(' ') +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 000000000..e25a44f67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..1e30b1c75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore b/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE new file mode 100644 index 000000000..106569e54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/README.md b/Godeps/_workspace/src/github.com/armon/go-metrics/README.md new file mode 100644 index 000000000..7b6f23e29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/README.md @@ -0,0 +1,71 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +===== + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Examples +======== + +Here is an example of using the package: + + func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) + } + + // Configure a statsite sink as the global metrics sink + sink, _ := metrics.NewStatsiteSink("statsite:8125") + metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + + // Emit a Key/Value pair + metrics.EmitKey([]string{"questions", "meaning of life"}, 42) + + +Here is an example of setting up an signal handler: + + // Setup the inmem sink and signal handler + inm := metrics.NewInmemSink(10*time.Second, time.Minute) + sig := metrics.DefaultInmemSignal(inm) + metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + + // Run some code + inm.SetGauge([]string{"foo"}, 42) + inm.EmitKey([]string{"bar"}, 30) + + inm.IncrCounter([]string{"baz"}, 42) + inm.IncrCounter([]string{"baz"}, 1) + inm.IncrCounter([]string{"baz"}, 80) + + inm.AddSample([]string{"method", "wow"}, 42) + inm.AddSample([]string{"method", "wow"}, 100) + inm.AddSample([]string{"method", "wow"}, 22) + + .... + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 + diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go b/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 000000000..31098dd57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go b/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 000000000..38136af3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go new file mode 100644 index 000000000..da5032960 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,241 @@ +package metrics + +import ( + "fmt" + "math" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]float32 + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]*AggregateSample + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]*AggregateSample +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]float32), + Points: make(map[string][]float32), + Counters: make(map[string]*AggregateSample), + Samples: make(map[string]*AggregateSample), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Sum float64 // The sum of values + SumSq float64 // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = val +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg := intv.Counters[k] + if agg == nil { + agg = &AggregateSample{} + intv.Counters[k] = agg + } + agg.Ingest(float64(val)) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg := intv.Samples[k] + if agg == nil { + agg = &AggregateSample{} + intv.Samples[k] = agg + } + agg.Ingest(float64(val)) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + intervals := make([]*IntervalMetrics, len(i.intervals)) + copy(intervals, i.intervals) + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Replace(joined, " ", "_", -1) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 000000000..95d08ee10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for i := 0; i < len(data)-1; i++ { + intv := data[i] + intv.RLock() + for name, val := range intv.Gauges { + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for name, agg := range intv.Counters { + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) + } + for name, agg := range intv.Samples { + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go new file mode 100644 index 000000000..9bbca5f25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go @@ -0,0 +1,46 @@ +package metrics + +import ( + "bytes" + "os" + "strings" + "syscall" + "testing" + "time" +) + +func TestInmemSignal(t *testing.T) { + buf := bytes.NewBuffer(nil) + inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) + sig := NewInmemSignal(inm, syscall.SIGUSR1, buf) + defer sig.Stop() + + inm.SetGauge([]string{"foo"}, 42) + inm.EmitKey([]string{"bar"}, 42) + inm.IncrCounter([]string{"baz"}, 42) + inm.AddSample([]string{"wow"}, 42) + + // Wait for period to end + time.Sleep(15 * time.Millisecond) + + // Send signal! + syscall.Kill(os.Getpid(), syscall.SIGUSR1) + + // Wait for flush + time.Sleep(10 * time.Millisecond) + + // Check the output + out := string(buf.Bytes()) + if !strings.Contains(out, "[G] 'foo': 42") { + t.Fatalf("bad: %v", out) + } + if !strings.Contains(out, "[P] 'bar': 42") { + t.Fatalf("bad: %v", out) + } + if !strings.Contains(out, "[C] 'baz': Count: 1 Sum: 42") { + t.Fatalf("bad: %v", out) + } + if !strings.Contains(out, "[S] 'wow': Count: 1 Sum: 42") { + t.Fatalf("bad: %v", out) + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go new file mode 100644 index 000000000..228a2fc1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go @@ -0,0 +1,104 @@ +package metrics + +import ( + "math" + "testing" + "time" +) + +func TestInmemSink(t *testing.T) { + inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) + + data := inm.Data() + if len(data) != 1 { + t.Fatalf("bad: %v", data) + } + + // Add data points + inm.SetGauge([]string{"foo", "bar"}, 42) + inm.EmitKey([]string{"foo", "bar"}, 42) + inm.IncrCounter([]string{"foo", "bar"}, 20) + inm.IncrCounter([]string{"foo", "bar"}, 22) + inm.AddSample([]string{"foo", "bar"}, 20) + inm.AddSample([]string{"foo", "bar"}, 22) + + data = inm.Data() + if len(data) != 1 { + t.Fatalf("bad: %v", data) + } + + intvM := data[0] + intvM.RLock() + + if time.Now().Sub(intvM.Interval) > 10*time.Millisecond { + t.Fatalf("interval too old") + } + if intvM.Gauges["foo.bar"] != 42 { + t.Fatalf("bad val: %v", intvM.Gauges) + } + if intvM.Points["foo.bar"][0] != 42 { + t.Fatalf("bad val: %v", intvM.Points) + } + + agg := intvM.Counters["foo.bar"] + if agg.Count != 2 { + t.Fatalf("bad val: %v", agg) + } + if agg.Sum != 42 { + t.Fatalf("bad val: %v", agg) + } + if agg.SumSq != 884 { + t.Fatalf("bad val: %v", agg) + } + if agg.Min != 20 { + t.Fatalf("bad val: %v", agg) + } + if agg.Max != 22 { + t.Fatalf("bad val: %v", agg) + } + if agg.Mean() != 21 { + t.Fatalf("bad val: %v", agg) + } + if agg.Stddev() != math.Sqrt(2) { + t.Fatalf("bad val: %v", agg) + } + + if agg.LastUpdated.IsZero() { + t.Fatalf("agg.LastUpdated is not set: %v", agg) + } + + diff := time.Now().Sub(agg.LastUpdated).Seconds() + if diff > 1 { + t.Fatalf("time diff too great: %f", diff) + } + + if agg = intvM.Samples["foo.bar"]; agg == nil { + t.Fatalf("missing sample") + } + + intvM.RUnlock() + + for i := 1; i < 10; i++ { + time.Sleep(10 * time.Millisecond) + inm.SetGauge([]string{"foo", "bar"}, 42) + data = inm.Data() + if len(data) != min(i+1, 5) { + t.Fatalf("bad: %v", data) + } + } + + // Should not exceed 5 intervals! + time.Sleep(10 * time.Millisecond) + inm.SetGauge([]string{"foo", "bar"}, 42) + data = inm.Data() + if len(data) != 5 { + t.Fatalf("bad: %v", data) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go new file mode 100644 index 000000000..b818e4182 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "runtime" + "time" +) + +func (m *Metrics) SetGauge(key []string, val float32) { + if m.HostName != "" && m.EnableHostname { + key = insert(0, m.HostName, key) + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.SetGauge(key, val) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.IncrCounter(key, val) +} + +func (m *Metrics) AddSample(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.AddSample(key, val) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSample(key, msec) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go new file mode 100644 index 000000000..c7baf22bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go @@ -0,0 +1,262 @@ +package metrics + +import ( + "reflect" + "runtime" + "testing" + "time" +) + +func mockMetric() (*MockSink, *Metrics) { + m := &MockSink{} + met := &Metrics{sink: m} + return m, met +} + +func TestMetrics_SetGauge(t *testing.T) { + m, met := mockMetric() + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.HostName = "test" + met.EnableHostname = true + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "test" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "gauge" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_EmitKey(t *testing.T) { + m, met := mockMetric() + met.EmitKey([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.EmitKey([]string{"key"}, float32(1)) + if m.keys[0][0] != "kv" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.EmitKey([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_IncrCounter(t *testing.T) { + m, met := mockMetric() + met.IncrCounter([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.IncrCounter([]string{"key"}, float32(1)) + if m.keys[0][0] != "counter" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.IncrCounter([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_AddSample(t *testing.T) { + m, met := mockMetric() + met.AddSample([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.AddSample([]string{"key"}, float32(1)) + if m.keys[0][0] != "sample" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.AddSample([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_MeasureSince(t *testing.T) { + m, met := mockMetric() + met.TimerGranularity = time.Millisecond + n := time.Now() + met.MeasureSince([]string{"key"}, n) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] > 0.1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.TimerGranularity = time.Millisecond + met.EnableTypePrefix = true + met.MeasureSince([]string{"key"}, n) + if m.keys[0][0] != "timer" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] > 0.1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.TimerGranularity = time.Millisecond + met.ServiceName = "service" + met.MeasureSince([]string{"key"}, n) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] > 0.1 { + t.Fatalf("") + } +} + +func TestMetrics_EmitRuntimeStats(t *testing.T) { + runtime.GC() + m, met := mockMetric() + met.emitRuntimeStats() + + if m.keys[0][0] != "runtime" || m.keys[0][1] != "num_goroutines" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[0] <= 1 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[1][0] != "runtime" || m.keys[1][1] != "alloc_bytes" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[1] <= 40000 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[2][0] != "runtime" || m.keys[2][1] != "sys_bytes" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[2] <= 100000 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[3][0] != "runtime" || m.keys[3][1] != "malloc_count" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[3] <= 100 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[4][0] != "runtime" || m.keys[4][1] != "free_count" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[4] <= 100 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[5][0] != "runtime" || m.keys[5][1] != "heap_objects" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[5] <= 100 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[6][0] != "runtime" || m.keys[6][1] != "total_gc_pause_ns" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[6] <= 100000 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[7][0] != "runtime" || m.keys[7][1] != "total_gc_runs" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[7] <= 1 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[8][0] != "runtime" || m.keys[8][1] != "gc_pause_ns" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[8] <= 1000 { + t.Fatalf("bad val: %v", m.vals) + } +} + +func TestInsert(t *testing.T) { + k := []string{"hi", "bob"} + exp := []string{"hi", "there", "bob"} + out := insert(1, "there", k) + if !reflect.DeepEqual(exp, out) { + t.Fatalf("bad insert %v %v", exp, out) + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go b/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go new file mode 100644 index 000000000..362dbfb62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go @@ -0,0 +1,88 @@ +// +build go1.3 +package prometheus + +import ( + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type PrometheusSink struct { + mu sync.Mutex + gauges map[string]prometheus.Gauge + summaries map[string]prometheus.Summary + counters map[string]prometheus.Counter +} + +func NewPrometheusSink() (*PrometheusSink, error) { + return &PrometheusSink{ + gauges: make(map[string]prometheus.Gauge), + summaries: make(map[string]prometheus.Summary), + counters: make(map[string]prometheus.Counter), + }, nil +} + +func (p *PrometheusSink) flattenKey(parts []string) string { + joined := strings.Join(parts, "_") + joined = strings.Replace(joined, " ", "_", -1) + joined = strings.Replace(joined, ".", "_", -1) + joined = strings.Replace(joined, "-", "_", -1) + return joined +} + +func (p *PrometheusSink) SetGauge(parts []string, val float32) { + p.mu.Lock() + defer p.mu.Unlock() + key := p.flattenKey(parts) + g, ok := p.gauges[key] + if !ok { + g = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: key, + Help: key, + }) + prometheus.MustRegister(g) + p.gauges[key] = g + } + g.Set(float64(val)) +} + +func (p *PrometheusSink) AddSample(parts []string, val float32) { + p.mu.Lock() + defer p.mu.Unlock() + key := p.flattenKey(parts) + g, ok := p.summaries[key] + if !ok { + g = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: key, + Help: key, + MaxAge: 10 * time.Second, + }) + prometheus.MustRegister(g) + p.summaries[key] = g + } + g.Observe(float64(val)) +} + +// EmitKey is not implemented. Prometheus doesn’t offer a type for which an +// arbitrary number of values is retained, as Prometheus works with a pull +// model, rather than a push model. +func (p *PrometheusSink) EmitKey(key []string, val float32) { +} + +func (p *PrometheusSink) IncrCounter(parts []string, val float32) { + p.mu.Lock() + defer p.mu.Unlock() + key := p.flattenKey(parts) + g, ok := p.counters[key] + if !ok { + g = prometheus.NewCounter(prometheus.CounterOpts{ + Name: key, + Help: key, + }) + prometheus.MustRegister(g) + p.counters[key] = g + } + g.Add(float64(val)) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go b/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go new file mode 100644 index 000000000..0c240c2c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go @@ -0,0 +1,52 @@ +package metrics + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + for _, s := range fh { + s.SetGauge(key, val) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + for _, s := range fh { + s.IncrCounter(key, val) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + for _, s := range fh { + s.AddSample(key, val) + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go new file mode 100644 index 000000000..15c5d771a --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go @@ -0,0 +1,120 @@ +package metrics + +import ( + "reflect" + "testing" +) + +type MockSink struct { + keys [][]string + vals []float32 +} + +func (m *MockSink) SetGauge(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} +func (m *MockSink) EmitKey(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} +func (m *MockSink) IncrCounter(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} +func (m *MockSink) AddSample(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} + +func TestFanoutSink_Gauge(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.SetGauge(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func TestFanoutSink_Key(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.EmitKey(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func TestFanoutSink_Counter(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.IncrCounter(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func TestFanoutSink_Sample(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.AddSample(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/start.go b/Godeps/_workspace/src/github.com/armon/go-metrics/start.go new file mode 100644 index 000000000..44113f100 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/start.go @@ -0,0 +1,95 @@ +package metrics + +import ( + "os" + "time" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to seperate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink +} + +// Shared global metrics instance +var globalMetrics *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics = &Metrics{sink: &BlackholeSink{}} +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics = metrics + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.SetGauge(key, val) +} + +func EmitKey(key []string, val float32) { + globalMetrics.EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.IncrCounter(key, val) +} + +func AddSample(key []string, val float32) { + globalMetrics.AddSample(key, val) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.MeasureSince(key, start) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go new file mode 100644 index 000000000..8b3210c15 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go @@ -0,0 +1,110 @@ +package metrics + +import ( + "reflect" + "testing" + "time" +) + +func TestDefaultConfig(t *testing.T) { + conf := DefaultConfig("service") + if conf.ServiceName != "service" { + t.Fatalf("Bad name") + } + if conf.HostName == "" { + t.Fatalf("missing hostname") + } + if !conf.EnableHostname || !conf.EnableRuntimeMetrics { + t.Fatalf("expect true") + } + if conf.EnableTypePrefix { + t.Fatalf("expect false") + } + if conf.TimerGranularity != time.Millisecond { + t.Fatalf("bad granularity") + } + if conf.ProfileInterval != time.Second { + t.Fatalf("bad interval") + } +} + +func Test_GlobalMetrics_SetGauge(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + SetGauge(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_EmitKey(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + EmitKey(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_IncrCounter(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + IncrCounter(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_AddSample(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + AddSample(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_MeasureSince(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + globalMetrics.TimerGranularity = time.Millisecond + + k := []string{"test"} + now := time.Now() + MeasureSince(k, now) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if m.vals[0] > 0.1 { + t.Fatalf("val too large %v", m.vals[0]) + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go new file mode 100644 index 000000000..65a5021a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,154 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go new file mode 100644 index 000000000..622eb5d3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go @@ -0,0 +1,105 @@ +package metrics + +import ( + "bufio" + "bytes" + "net" + "testing" + "time" +) + +func TestStatsd_Flatten(t *testing.T) { + s := &StatsdSink{} + flat := s.flattenKey([]string{"a", "b", "c", "d"}) + if flat != "a.b.c.d" { + t.Fatalf("Bad flat") + } +} + +func TestStatsd_PushFullQueue(t *testing.T) { + q := make(chan string, 1) + q <- "full" + + s := &StatsdSink{metricQueue: q} + s.pushMetric("omit") + + out := <-q + if out != "full" { + t.Fatalf("bad val %v", out) + } + + select { + case v := <-q: + t.Fatalf("bad val %v", v) + default: + } +} + +func TestStatsd_Conn(t *testing.T) { + addr := "127.0.0.1:7524" + done := make(chan bool) + go func() { + list, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 7524}) + if err != nil { + panic(err) + } + defer list.Close() + buf := make([]byte, 1500) + n, err := list.Read(buf) + if err != nil { + panic(err) + } + buf = buf[:n] + reader := bufio.NewReader(bytes.NewReader(buf)) + + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "gauge.val:1.000000|g\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "key.other:2.000000|kv\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "counter.me:3.000000|c\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "sample.slow_thingy:4.000000|ms\n" { + t.Fatalf("bad line %s", line) + } + + done <- true + }() + s, err := NewStatsdSink(addr) + if err != nil { + t.Fatalf("bad error") + } + + s.SetGauge([]string{"gauge", "val"}, float32(1)) + s.EmitKey([]string{"key", "other"}, float32(2)) + s.IncrCounter([]string{"counter", "me"}, float32(3)) + s.AddSample([]string{"sample", "slow thingy"}, float32(4)) + + select { + case <-done: + s.Shutdown() + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go new file mode 100644 index 000000000..68730139a --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,142 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go new file mode 100644 index 000000000..d9c744f41 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go @@ -0,0 +1,101 @@ +package metrics + +import ( + "bufio" + "net" + "testing" + "time" +) + +func acceptConn(addr string) net.Conn { + ln, _ := net.Listen("tcp", addr) + conn, _ := ln.Accept() + return conn +} + +func TestStatsite_Flatten(t *testing.T) { + s := &StatsiteSink{} + flat := s.flattenKey([]string{"a", "b", "c", "d"}) + if flat != "a.b.c.d" { + t.Fatalf("Bad flat") + } +} + +func TestStatsite_PushFullQueue(t *testing.T) { + q := make(chan string, 1) + q <- "full" + + s := &StatsiteSink{metricQueue: q} + s.pushMetric("omit") + + out := <-q + if out != "full" { + t.Fatalf("bad val %v", out) + } + + select { + case v := <-q: + t.Fatalf("bad val %v", v) + default: + } +} + +func TestStatsite_Conn(t *testing.T) { + addr := "localhost:7523" + done := make(chan bool) + go func() { + conn := acceptConn(addr) + reader := bufio.NewReader(conn) + + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "gauge.val:1.000000|g\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "key.other:2.000000|kv\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "counter.me:3.000000|c\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "sample.slow_thingy:4.000000|ms\n" { + t.Fatalf("bad line %s", line) + } + + conn.Close() + done <- true + }() + s, err := NewStatsiteSink(addr) + if err != nil { + t.Fatalf("bad error") + } + + s.SetGauge([]string{"gauge", "val"}, float32(1)) + s.EmitKey([]string{"key", "other"}, float32(2)) + s.IncrCounter([]string{"counter", "me"}, float32(3)) + s.AddSample([]string{"sample", "slow thingy"}, float32(4)) + + select { + case <-done: + s.Shutdown() + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore b/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore new file mode 100644 index 000000000..c7bd2b7a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE b/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE new file mode 100644 index 000000000..004e77fe5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile new file mode 100644 index 000000000..cfbed514b --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile @@ -0,0 +1,54 @@ +TEST=. +BENCH=. +COVERPROFILE=/tmp/c.out +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +bench: + go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) + +# http://cloc.sourceforge.net/ +cloc: + @cloc --not-match-f='Makefile|_test.go' . + +cover: fmt + go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . + go tool cover -html=$(COVERPROFILE) + rm $(COVERPROFILE) + +cpuprofile: fmt + @go test -c + @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof + +# go get github.com/kisielk/errcheck +errcheck: + @echo "=== errcheck ===" + @errcheck github.com/boltdb/bolt + +fmt: + @go fmt ./... + +get: + @go get -d ./... + +build: get + @mkdir -p bin + @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt + +test: fmt + @go get github.com/stretchr/testify/assert + @echo "=== TESTS ===" + @go test -v -cover -test.run=$(TEST) + @echo "" + @echo "" + @echo "=== CLI ===" + @go test -v -test.run=$(TEST) ./cmd/bolt + @echo "" + @echo "" + @echo "=== RACE DETECTOR ===" + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +.PHONY: bench cloc cover cpuprofile fmt memprofile test diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md new file mode 100644 index 000000000..00fad6afb --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md @@ -0,0 +1,621 @@ +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and +the [LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + + +## Project Status + +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. + + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `Tx.Begin()` function directly but _please_ be sure to close the +transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guarenteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +When you have iterated to the end of the cursor then `Next()` will return `nil`. +You must seek to a position using `First()`, `Last()`, or `Seek()` before +calling `Next()` or `Prev()`. If you do not seek to a position then these +functions will return `nil`. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. It will also use `O_DIRECT` when available +to prevent page cache trashing. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can add a write-ahead log or + [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt + to mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. +* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go new file mode 100644 index 000000000..84acae6bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go @@ -0,0 +1,138 @@ +package bolt + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go new file mode 100644 index 000000000..b745a371f --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go @@ -0,0 +1,170 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "errors" + "hash/fnv" + "sync" + "testing" + + "github.com/boltdb/bolt" +) + +func validateBatchBench(b *testing.B, db *TestDB) { + var rollback = errors.New("sentinel error to cause rollback") + validate := func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("bench")) + h := fnv.New32a() + buf := make([]byte, 4) + for id := uint32(0); id < 1000; id++ { + binary.LittleEndian.PutUint32(buf, id) + h.Reset() + h.Write(buf[:]) + k := h.Sum(nil) + v := bucket.Get(k) + if v == nil { + b.Errorf("not found id=%d key=%x", id, k) + continue + } + if g, e := v, []byte("filler"); !bytes.Equal(g, e) { + b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) + } + if err := bucket.Delete(k); err != nil { + return err + } + } + // should be empty now + c := bucket.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + b.Errorf("unexpected key: %x = %q", k, v) + } + return rollback + } + if err := db.Update(validate); err != nil && err != rollback { + b.Error(err) + } +} + +func BenchmarkDBBatchAutomatic(b *testing.B) { + db := NewTestDB() + defer db.Close() + db.MustCreateBucket([]byte("bench")) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Batch(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchSingle(b *testing.B) { + db := NewTestDB() + defer db.Close() + db.MustCreateBucket([]byte("bench")) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Update(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchManual10x100(b *testing.B) { + db := NewTestDB() + defer db.Close() + db.MustCreateBucket([]byte("bench")) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for major := 0; major < 10; major++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + insert100 := func(tx *bolt.Tx) error { + h := fnv.New32a() + buf := make([]byte, 4) + for minor := uint32(0); minor < 100; minor++ { + binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) + h.Reset() + h.Write(buf[:]) + k := h.Sum(nil) + b := tx.Bucket([]byte("bench")) + if err := b.Put(k, []byte("filler")); err != nil { + return err + } + } + return nil + } + if err := db.Update(insert100); err != nil { + b.Fatal(err) + } + }(uint32(major)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go new file mode 100644 index 000000000..74eff8af9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go @@ -0,0 +1,148 @@ +package bolt_test + +import ( + "encoding/binary" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "net/http/httptest" + "os" + + "github.com/boltdb/bolt" +) + +// Set this to see how the counts are actually updated. +const verbose = false + +// Counter updates a counter in Bolt for every URL path requested. +type counter struct { + db *bolt.DB +} + +func (c counter) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // Communicates the new count from a successful database + // transaction. + var result uint64 + + increment := func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("hits")) + if err != nil { + return err + } + key := []byte(req.URL.String()) + // Decode handles key not found for us. + count := decode(b.Get(key)) + 1 + b.Put(key, encode(count)) + // All good, communicate new count. + result = count + return nil + } + if err := c.db.Batch(increment); err != nil { + http.Error(rw, err.Error(), 500) + return + } + + if verbose { + log.Printf("server: %s: %d", req.URL.String(), result) + } + + rw.Header().Set("Content-Type", "application/octet-stream") + fmt.Fprintf(rw, "%d\n", result) +} + +func client(id int, base string, paths []string) error { + // Process paths in random order. + rng := rand.New(rand.NewSource(int64(id))) + permutation := rng.Perm(len(paths)) + + for i := range paths { + path := paths[permutation[i]] + resp, err := http.Get(base + path) + if err != nil { + return err + } + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if verbose { + log.Printf("client: %s: %s", path, buf) + } + } + return nil +} + +func ExampleDB_Batch() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Start our web server + count := counter{db} + srv := httptest.NewServer(count) + defer srv.Close() + + // Decrease the batch size to make things more interesting. + db.MaxBatchSize = 3 + + // Get every path multiple times concurrently. + const clients = 10 + paths := []string{ + "/foo", + "/bar", + "/baz", + "/quux", + "/thud", + "/xyzzy", + } + errors := make(chan error, clients) + for i := 0; i < clients; i++ { + go func(id int) { + errors <- client(id, srv.URL, paths) + }(i) + } + // Check all responses to make sure there's no error. + for i := 0; i < clients; i++ { + if err := <-errors; err != nil { + fmt.Printf("client error: %v", err) + return + } + } + + // Check the final result + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("hits")) + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("hits to %s: %d\n", k, decode(v)) + } + return nil + }) + + // Output: + // hits to /bar: 10 + // hits to /baz: 10 + // hits to /foo: 10 + // hits to /quux: 10 + // hits to /thud: 10 + // hits to /xyzzy: 10 +} + +// encode marshals a counter. +func encode(n uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, n) + return buf +} + +// decode unmarshals a counter. Nil buffers are decoded as 0. +func decode(buf []byte) uint64 { + if buf == nil { + return 0 + } + return binary.BigEndian.Uint64(buf) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go new file mode 100644 index 000000000..0b5075fdd --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go @@ -0,0 +1,167 @@ +package bolt_test + +import ( + "testing" + "time" + + "github.com/boltdb/bolt" +) + +// Ensure two functions can perform updates in a single batch. +func TestDB_Batch(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.MustCreateBucket([]byte("widgets")) + + // Iterate over multiple updates in separate goroutines. + n := 2 + ch := make(chan error) + for i := 0; i < n; i++ { + go func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + }(i) + } + + // Check all responses to make sure there's no error. + for i := 0; i < n; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + db.MustView(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < n; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }) +} + +func TestDB_Batch_Panic(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var sentinel int + var bork = &sentinel + var problem interface{} + var err error + + // Execute a function inside a batch that panics. + func() { + defer func() { + if p := recover(); p != nil { + problem = p + } + }() + err = db.Batch(func(tx *bolt.Tx) error { + panic(bork) + }) + }() + + // Verify there is no error. + if g, e := err, error(nil); g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } + // Verify the panic was captured. + if g, e := problem, bork; g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } +} + +func TestDB_BatchFull(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.MustCreateBucket([]byte("widgets")) + + const size = 3 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = size + // high enough to never trigger here + db.MaxBatchDelay = 1 * time.Hour + + go put(1) + go put(2) + + // Give the batch a chance to exhibit bugs. + time.Sleep(10 * time.Millisecond) + + // not triggered yet + select { + case <-ch: + t.Fatalf("batch triggered too early") + default: + } + + go put(3) + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + db.MustView(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }) +} + +func TestDB_BatchTime(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.MustCreateBucket([]byte("widgets")) + + const size = 1 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = 1000 + db.MaxBatchDelay = 0 + + go put(1) + + // Batch must trigger by time alone. + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + db.MustView(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 000000000..e659bfb91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 000000000..cca6b7eb7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 000000000..e659bfb91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 000000000..e9d1c907b --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,12 @@ +package bolt + +import ( + "syscall" +) + +var odirect = syscall.O_DIRECT + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 000000000..7c1bef1a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,29 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +var odirect int + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go new file mode 100644 index 000000000..b7bea1fc5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go @@ -0,0 +1,36 @@ +package bolt_test + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +// assert fails the test if the condition is false. +func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// ok fails the test if an err is not nil. +func ok(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.FailNow() + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 000000000..17ca318bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,100 @@ +// +build !windows,!plan9 + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(f *os.File, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(f *os.File) error { + return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 000000000..8b782be5f --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,76 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +var odirect int + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(f *os.File, _ bool, _ time.Duration) error { + return nil +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(f *os.File) error { + return nil +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 000000000..8db89776f --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,10 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +var odirect int + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go new file mode 100644 index 000000000..676699210 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,743 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = 4294967295 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go new file mode 100644 index 000000000..62b8c5878 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go @@ -0,0 +1,1169 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math/rand" + "os" + "strconv" + "strings" + "testing" + "testing/quick" + + "github.com/boltdb/bolt" +) + +// Ensure that a bucket that gets a non-existent key returns nil. +func TestBucket_Get_NonExistent(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + assert(t, value == nil, "") + return nil + }) +} + +// Ensure that a bucket can read a value that is not flushed yet. +func TestBucket_Get_FromNode(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + b.Put([]byte("foo"), []byte("bar")) + value := b.Get([]byte("foo")) + equals(t, []byte("bar"), value) + return nil + }) +} + +// Ensure that a bucket retrieved via Get() returns a nil. +func TestBucket_Get_IncompatibleValue(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + return nil + }) +} + +// Ensure that a bucket can write a key/value. +func TestBucket_Put(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + ok(t, err) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + equals(t, value, []byte("bar")) + return nil + }) +} + +// Ensure that a bucket can rewrite a key in the same transaction. +func TestBucket_Put_Repeat(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + ok(t, b.Put([]byte("foo"), []byte("bar"))) + ok(t, b.Put([]byte("foo"), []byte("baz"))) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + equals(t, value, []byte("baz")) + return nil + }) +} + +// Ensure that a bucket can write a bunch of large values. +func TestBucket_Put_Large(t *testing.T) { + db := NewTestDB() + defer db.Close() + + count, factor := 100, 200 + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + for i := 1; i < count; i++ { + ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) + } + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i < count; i++ { + value := b.Get([]byte(strings.Repeat("0", i*factor))) + equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) + } + return nil + }) +} + +// Ensure that a database can perform multiple large appends safely. +func TestDB_Put_VeryLarge(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + n, batchN := 400000, 200000 + ksize, vsize := 8, 500 + + db := NewTestDB() + defer db.Close() + + for i := 0; i < n; i += batchN { + err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) + for j := 0; j < batchN; j++ { + k, v := make([]byte, ksize), make([]byte, vsize) + binary.BigEndian.PutUint32(k, uint32(i+j)) + ok(t, b.Put(k, v)) + } + return nil + }) + ok(t, err) + } +} + +// Ensure that a setting a value on a key with a bucket value returns an error. +func TestBucket_Put_IncompatibleValue(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + return nil + }) +} + +// Ensure that a setting a value while the transaction is closed returns an error. +func TestBucket_Put_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) +} + +// Ensure that setting a value on a read-only bucket returns an error. +func TestBucket_Put_ReadOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + err := b.Put([]byte("foo"), []byte("bar")) + equals(t, err, bolt.ErrTxNotWritable) + return nil + }) +} + +// Ensure that a bucket can delete an existing key. +func TestBucket_Delete(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) + ok(t, err) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + assert(t, value == nil, "") + return nil + }) +} + +// Ensure that deleting a large set of keys will work correctly. +func TestBucket_Delete_Large(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + var b, _ = tx.CreateBucket([]byte("widgets")) + for i := 0; i < 100; i++ { + ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) + } + return nil + }) + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + ok(t, b.Delete([]byte(strconv.Itoa(i)))) + } + return nil + }) + db.View(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") + } + return nil + }) +} + +// Deleting a very large list of keys will cause the freelist to use overflow. +func TestBucket_Delete_FreelistOverflow(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db := NewTestDB() + defer db.Close() + k := make([]byte, 16) + for i := uint64(0); i < 10000; i++ { + err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("0")) + if err != nil { + t.Fatalf("bucket error: %s", err) + } + + for j := uint64(0); j < 1000; j++ { + binary.BigEndian.PutUint64(k[:8], i) + binary.BigEndian.PutUint64(k[8:], j) + if err := b.Put(k, nil); err != nil { + t.Fatalf("put error: %s", err) + } + } + + return nil + }) + + if err != nil { + t.Fatalf("update error: %s", err) + } + } + + // Delete all of them in one large transaction + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("0")) + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + b.Delete(k) + } + return nil + }) + + // Check that a freelist overflow occurred. + ok(t, err) +} + +// Ensure that accessing and updating nested buckets is ok across transactions. +func TestBucket_Nested(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + // Create a widgets bucket. + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + + // Create a widgets/foo bucket. + _, err = b.CreateBucket([]byte("foo")) + ok(t, err) + + // Create a widgets/bar key. + ok(t, b.Put([]byte("bar"), []byte("0000"))) + + return nil + }) + db.MustCheck() + + // Update widgets/bar. + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + ok(t, b.Put([]byte("bar"), []byte("xxxx"))) + return nil + }) + db.MustCheck() + + // Cause a split. + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 10000; i++ { + ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) + } + return nil + }) + db.MustCheck() + + // Insert into widgets/foo/baz. + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) + return nil + }) + db.MustCheck() + + // Verify. + db.View(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) + equals(t, []byte("xxxx"), b.Get([]byte("bar"))) + for i := 0; i < 10000; i++ { + equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) + } + return nil + }) +} + +// Ensure that deleting a bucket using Delete() returns an error. +func TestBucket_Delete_Bucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + _, err := b.CreateBucket([]byte("foo")) + ok(t, err) + equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) + return nil + }) +} + +// Ensure that deleting a key on a read-only bucket returns an error. +func TestBucket_Delete_ReadOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + err := b.Delete([]byte("foo")) + equals(t, err, bolt.ErrTxNotWritable) + return nil + }) +} + +// Ensure that a deleting value while the transaction is closed returns an error. +func TestBucket_Delete_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) +} + +// Ensure that deleting a bucket causes nested buckets to be deleted. +func TestBucket_DeleteBucket_Nested(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + return nil + }) +} + +// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. +func TestBucket_DeleteBucket_Nested2(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + return nil + }) + db.Update(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) + ok(t, tx.DeleteBucket([]byte("widgets"))) + return nil + }) + db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) == nil, "") + return nil + }) +} + +// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. +func TestBucket_DeleteBucket_Large(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) + for i := 0; i < 1000; i++ { + ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) + } + return nil + }) + db.Update(func(tx *bolt.Tx) error { + ok(t, tx.DeleteBucket([]byte("widgets"))) + return nil + }) + + // NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly. +} + +// Ensure that a simple value retrieved via Bucket() returns a nil. +func TestBucket_Bucket_IncompatibleValue(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") + return nil + }) +} + +// Ensure that creating a bucket on an existing non-bucket key returns an error. +func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + equals(t, bolt.ErrIncompatibleValue, err) + return nil + }) +} + +// Ensure that deleting a bucket on an existing non-bucket key returns an error. +func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + return nil + }) +} + +// Ensure that a bucket can return an autoincrementing sequence. +func TestBucket_NextSequence(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.CreateBucket([]byte("woojits")) + + // Make sure sequence increments. + seq, err := tx.Bucket([]byte("widgets")).NextSequence() + ok(t, err) + equals(t, seq, uint64(1)) + seq, err = tx.Bucket([]byte("widgets")).NextSequence() + ok(t, err) + equals(t, seq, uint64(2)) + + // Buckets should be separate. + seq, err = tx.Bucket([]byte("woojits")).NextSequence() + ok(t, err) + equals(t, seq, uint64(1)) + return nil + }) +} + +// Ensure that a bucket will persist an autoincrementing sequence even if its +// the only thing updated on the bucket. +// https://github.com/boltdb/bolt/issues/296 +func TestBucket_NextSequence_Persist(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, _ = tx.CreateBucket([]byte("widgets")) + return nil + }) + + db.Update(func(tx *bolt.Tx) error { + _, _ = tx.Bucket([]byte("widgets")).NextSequence() + return nil + }) + + db.Update(func(tx *bolt.Tx) error { + seq, err := tx.Bucket([]byte("widgets")).NextSequence() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } else if seq != 2 { + t.Fatalf("unexpected sequence: %d", seq) + } + return nil + }) +} + +// Ensure that retrieving the next sequence on a read-only bucket returns an error. +func TestBucket_NextSequence_ReadOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + i, err := b.NextSequence() + equals(t, i, uint64(0)) + equals(t, err, bolt.ErrTxNotWritable) + return nil + }) +} + +// Ensure that retrieving the next sequence for a bucket on a closed database return an error. +func TestBucket_NextSequence_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + _, err := b.NextSequence() + equals(t, bolt.ErrTxClosed, err) +} + +// Ensure a user can loop over all key/value pairs in a bucket. +func TestBucket_ForEach(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) + + var index int + err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + switch index { + case 0: + equals(t, k, []byte("bar")) + equals(t, v, []byte("0002")) + case 1: + equals(t, k, []byte("baz")) + equals(t, v, []byte("0001")) + case 2: + equals(t, k, []byte("foo")) + equals(t, v, []byte("0000")) + } + index++ + return nil + }) + ok(t, err) + equals(t, index, 3) + return nil + }) +} + +// Ensure a database can stop iteration early. +func TestBucket_ForEach_ShortCircuit(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) + + var index int + err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + index++ + if bytes.Equal(k, []byte("baz")) { + return errors.New("marker") + } + return nil + }) + equals(t, errors.New("marker"), err) + equals(t, 2, index) + return nil + }) +} + +// Ensure that looping over a bucket on a closed database returns an error. +func TestBucket_ForEach_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + err := b.ForEach(func(k, v []byte) error { return nil }) + equals(t, bolt.ErrTxClosed, err) +} + +// Ensure that an error is returned when inserting with an empty key. +func TestBucket_Put_EmptyKey(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) + equals(t, err, bolt.ErrKeyRequired) + err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) + equals(t, err, bolt.ErrKeyRequired) + return nil + }) +} + +// Ensure that an error is returned when inserting with a key that's too large. +func TestBucket_Put_KeyTooLarge(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) + equals(t, err, bolt.ErrKeyTooLarge) + return nil + }) +} + +// Ensure that an error is returned when inserting a value that's too large. +func TestBucket_Put_ValueTooLarge(t *testing.T) { + if os.Getenv("DRONE") == "true" { + t.Skip("not enough RAM for test") + } + + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)) + equals(t, err, bolt.ErrValueTooLarge) + return nil + }) +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats(t *testing.T) { + db := NewTestDB() + defer db.Close() + + // Add bucket with fewer keys but one big value. + big_key := []byte("really-big-value") + for i := 0; i < 500; i++ { + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) + }) + } + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + return b.Put(big_key, []byte(strings.Repeat("*", 10000))) + }) + + db.MustCheck() + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("woojits")) + stats := b.Stats() + equals(t, 1, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 7, stats.LeafPageN) + equals(t, 2, stats.LeafOverflowN) + equals(t, 501, stats.KeyN) + equals(t, 2, stats.Depth) + + branchInuse := 16 // branch page header + branchInuse += 7 * 16 // branch elements + branchInuse += 7 * 3 // branch keys (6 3-byte keys) + equals(t, branchInuse, stats.BranchInuse) + + leafInuse := 7 * 16 // leaf page header + leafInuse += 501 * 16 // leaf elements + leafInuse += 500*3 + len(big_key) // leaf keys + leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values + equals(t, leafInuse, stats.LeafInuse) + + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 4096, stats.BranchAlloc) + equals(t, 36864, stats.LeafAlloc) + } + + equals(t, 1, stats.BucketN) + equals(t, 0, stats.InlineBucketN) + equals(t, 0, stats.InlineBucketInuse) + return nil + }) +} + +// Ensure a bucket with random insertion utilizes fill percentage correctly. +func TestBucket_Stats_RandomFill(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } else if os.Getpagesize() != 4096 { + t.Skip("invalid page size for test") + } + + db := NewTestDB() + defer db.Close() + + // Add a set of values in random order. It will be the same random + // order so we can maintain consistency between test runs. + var count int + r := rand.New(rand.NewSource(42)) + for _, i := range r.Perm(1000) { + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + b.FillPercent = 0.9 + for _, j := range r.Perm(100) { + index := (j * 10000) + i + b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) + count++ + } + return nil + }) + } + db.MustCheck() + + db.View(func(tx *bolt.Tx) error { + s := tx.Bucket([]byte("woojits")).Stats() + equals(t, 100000, s.KeyN) + + equals(t, 98, s.BranchPageN) + equals(t, 0, s.BranchOverflowN) + equals(t, 130984, s.BranchInuse) + equals(t, 401408, s.BranchAlloc) + + equals(t, 3412, s.LeafPageN) + equals(t, 0, s.LeafOverflowN) + equals(t, 4742482, s.LeafInuse) + equals(t, 13975552, s.LeafAlloc) + return nil + }) +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats_Small(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + b, err := tx.CreateBucket([]byte("whozawhats")) + ok(t, err) + b.Put([]byte("foo"), []byte("bar")) + + return nil + }) + db.MustCheck() + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 0, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 1, stats.KeyN) + equals(t, 1, stats.Depth) + equals(t, 0, stats.BranchInuse) + equals(t, 0, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 0, stats.BranchAlloc) + equals(t, 0, stats.LeafAlloc) + } + equals(t, 1, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, 16+16+6, stats.InlineBucketInuse) + return nil + }) +} + +func TestBucket_Stats_EmptyBucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + _, err := tx.CreateBucket([]byte("whozawhats")) + ok(t, err) + return nil + }) + db.MustCheck() + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 0, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 0, stats.KeyN) + equals(t, 1, stats.Depth) + equals(t, 0, stats.BranchInuse) + equals(t, 0, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 0, stats.BranchAlloc) + equals(t, 0, stats.LeafAlloc) + } + equals(t, 1, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, 16, stats.InlineBucketInuse) + return nil + }) +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats_Nested(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("foo")) + ok(t, err) + for i := 0; i < 100; i++ { + b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) + } + bar, err := b.CreateBucket([]byte("bar")) + ok(t, err) + for i := 0; i < 10; i++ { + bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) + } + baz, err := bar.CreateBucket([]byte("baz")) + ok(t, err) + for i := 0; i < 10; i++ { + baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) + } + return nil + }) + + db.MustCheck() + + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("foo")) + stats := b.Stats() + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 2, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 122, stats.KeyN) + equals(t, 3, stats.Depth) + equals(t, 0, stats.BranchInuse) + + foo := 16 // foo (pghdr) + foo += 101 * 16 // foo leaf elements + foo += 100*2 + 100*2 // foo leaf key/values + foo += 3 + 16 // foo -> bar key/value + + bar := 16 // bar (pghdr) + bar += 11 * 16 // bar leaf elements + bar += 10 + 10 // bar leaf key/values + bar += 3 + 16 // bar -> baz key/value + + baz := 16 // baz (inline) (pghdr) + baz += 10 * 16 // baz leaf elements + baz += 10 + 10 // baz leaf key/values + + equals(t, foo+bar+baz, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 0, stats.BranchAlloc) + equals(t, 8192, stats.LeafAlloc) + } + equals(t, 3, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, baz, stats.InlineBucketInuse) + return nil + }) +} + +// Ensure a large bucket can calculate stats. +func TestBucket_Stats_Large(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db := NewTestDB() + defer db.Close() + + var index int + for i := 0; i < 100; i++ { + db.Update(func(tx *bolt.Tx) error { + // Add bucket with lots of keys. + b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) + for i := 0; i < 1000; i++ { + b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) + index++ + } + return nil + }) + } + db.MustCheck() + + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + stats := b.Stats() + equals(t, 13, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 1196, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 100000, stats.KeyN) + equals(t, 3, stats.Depth) + equals(t, 25257, stats.BranchInuse) + equals(t, 2596916, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 53248, stats.BranchAlloc) + equals(t, 4898816, stats.LeafAlloc) + } + equals(t, 1, stats.BucketN) + equals(t, 0, stats.InlineBucketN) + equals(t, 0, stats.InlineBucketInuse) + return nil + }) +} + +// Ensure that a bucket can write random keys and values across multiple transactions. +func TestBucket_Put_Single(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + index := 0 + f := func(items testdata) bool { + db := NewTestDB() + defer db.Close() + + m := make(map[string][]byte) + + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + for _, item := range items { + db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { + panic("put error: " + err.Error()) + } + m[string(item.Key)] = item.Value + return nil + }) + + // Verify all key/values so far. + db.View(func(tx *bolt.Tx) error { + i := 0 + for k, v := range m { + value := tx.Bucket([]byte("widgets")).Get([]byte(k)) + if !bytes.Equal(value, v) { + t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) + db.CopyTempFile() + t.FailNow() + } + i++ + } + return nil + }) + } + + index++ + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can insert multiple key/value pairs at once. +func TestBucket_Put_Multiple(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + f := func(items testdata) bool { + db := NewTestDB() + defer db.Close() + // Bulk insert all values. + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + return nil + }) + ok(t, err) + + // Verify all items exist. + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + value := b.Get(item.Key) + if !bytes.Equal(item.Value, value) { + db.CopyTempFile() + t.Fatalf("exp=%x; got=%x", item.Value, value) + } + } + return nil + }) + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. +func TestBucket_Delete_Quick(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + f := func(items testdata) bool { + db := NewTestDB() + defer db.Close() + // Bulk insert all values. + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + return nil + }) + ok(t, err) + + // Remove items one at a time and check consistency. + for _, item := range items { + err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete(item.Key) + }) + ok(t, err) + } + + // Anything before our deletion index should be nil. + db.View(func(tx *bolt.Tx) error { + tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) + return nil + }) + return nil + }) + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +func ExampleBucket_Put() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Start a write transaction. + db.Update(func(tx *bolt.Tx) error { + // Create a bucket. + tx.CreateBucket([]byte("widgets")) + + // Set the value "bar" for the key "foo". + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + return nil + }) + + // Read value back in a different read-only transaction. + db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }) + + // Output: + // The value of 'foo' is: bar +} + +func ExampleBucket_Delete() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Start a write transaction. + db.Update(func(tx *bolt.Tx) error { + // Create a bucket. + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + + // Set the value "bar" for the key "foo". + b.Put([]byte("foo"), []byte("bar")) + + // Retrieve the key back from the database and verify it. + value := b.Get([]byte("foo")) + fmt.Printf("The value of 'foo' was: %s\n", value) + return nil + }) + + // Delete the key in a different write transaction. + db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) + }) + + // Retrieve the key again. + db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if value == nil { + fmt.Printf("The value of 'foo' is now: nil\n") + } + return nil + }) + + // Output: + // The value of 'foo' was: bar + // The value of 'foo' is now: nil +} + +func ExampleBucket_ForEach() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Insert data into a bucket. + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("animals")) + b := tx.Bucket([]byte("animals")) + b.Put([]byte("dog"), []byte("fun")) + b.Put([]byte("cat"), []byte("lame")) + b.Put([]byte("liger"), []byte("awesome")) + + // Iterate over items in sorted key order. + b.ForEach(func(k, v []byte) error { + fmt.Printf("A %s is %s.\n", k, v) + return nil + }) + return nil + }) + + // Output: + // A cat is lame. + // A dog is fun. + // A liger is awesome. +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go new file mode 100644 index 000000000..c41ebe404 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go @@ -0,0 +1,1529 @@ +package main + +import ( + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + "unsafe" + + "github.com/boltdb/bolt" +) + +var ( + // ErrUsage is returned when a usage message was printed and the process + // should simply exit with an error. + ErrUsage = errors.New("usage") + + // ErrUnknownCommand is returned when a CLI command is not specified. + ErrUnknownCommand = errors.New("unknown command") + + // ErrPathRequired is returned when the path to a Bolt database is not specified. + ErrPathRequired = errors.New("path required") + + // ErrFileNotFound is returned when a Bolt database does not exist. + ErrFileNotFound = errors.New("file not found") + + // ErrInvalidValue is returned when a benchmark reads an unexpected value. + ErrInvalidValue = errors.New("invalid value") + + // ErrCorrupt is returned when a checking a data file finds errors. + ErrCorrupt = errors.New("invalid value") + + // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly + // divided by the iteration count. + ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") + + // ErrPageIDRequired is returned when a required page id is not specified. + ErrPageIDRequired = errors.New("page id required") + + // ErrPageNotFound is returned when specifying a page above the high water mark. + ErrPageNotFound = errors.New("page not found") + + // ErrPageFreed is returned when reading a page that has already been freed. + ErrPageFreed = errors.New("page freed") +) + +// PageHeaderSize represents the size of the bolt.page header. +const PageHeaderSize = 16 + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err == ErrUsage { + os.Exit(2) + } else if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} + +// Main represents the main program execution. +type Main struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main connect to the standard input/output. +func NewMain() *Main { + return &Main{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run executes the program. +func (m *Main) Run(args ...string) error { + // Require a command at the beginning. + if len(args) == 0 || strings.HasPrefix(args[0], "-") { + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + } + + // Execute command. + switch args[0] { + case "help": + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + case "bench": + return newBenchCommand(m).Run(args[1:]...) + case "check": + return newCheckCommand(m).Run(args[1:]...) + case "dump": + return newDumpCommand(m).Run(args[1:]...) + case "info": + return newInfoCommand(m).Run(args[1:]...) + case "page": + return newPageCommand(m).Run(args[1:]...) + case "pages": + return newPagesCommand(m).Run(args[1:]...) + case "stats": + return newStatsCommand(m).Run(args[1:]...) + default: + return ErrUnknownCommand + } +} + +// Usage returns the help message. +func (m *Main) Usage() string { + return strings.TrimLeft(` +Bolt is a tool for inspecting bolt databases. + +Usage: + + bolt command [arguments] + +The commands are: + + bench run synthetic benchmark against bolt + check verifies integrity of bolt database + info print basic info + help print this screen + pages print list of pages with their types + stats iterate over all pages and generate usage stats + +Use "bolt [command] -h" for more information about a command. +`, "\n") +} + +// CheckCommand represents the "check" command execution. +type CheckCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewCheckCommand returns a CheckCommand. +func newCheckCommand(m *Main) *CheckCommand { + return &CheckCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CheckCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Perform consistency check. + return db.View(func(tx *bolt.Tx) error { + var count int + ch := tx.Check() + loop: + for { + select { + case err, ok := <-ch: + if !ok { + break loop + } + fmt.Fprintln(cmd.Stdout, err) + count++ + } + } + + // Print summary of errors. + if count > 0 { + fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) + return ErrCorrupt + } + + // Notify user that database is valid. + fmt.Fprintln(cmd.Stdout, "OK") + return nil + }) +} + +// Usage returns the help message. +func (cmd *CheckCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt check PATH + +Check opens a database at PATH and runs an exhaustive check to verify that +all pages are accessible or are marked as freed. It also verifies that no +pages are double referenced. + +Verification errors will stream out as they are found and the process will +return after all pages have been checked. +`, "\n") +} + +// InfoCommand represents the "info" command execution. +type InfoCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewInfoCommand returns a InfoCommand. +func newInfoCommand(m *Main) *InfoCommand { + return &InfoCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *InfoCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open the database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print basic database info. + info := db.Info() + fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) + + return nil +} + +// Usage returns the help message. +func (cmd *InfoCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt info PATH + +Info prints basic information about the Bolt database at PATH. +`, "\n") +} + +// DumpCommand represents the "dump" command execution. +type DumpCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newDumpCommand returns a DumpCommand. +func newDumpCommand(m *Main) *DumpCommand { + return &DumpCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *DumpCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database to retrieve page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return err + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Print page to stdout. + if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { + return err + } + } + + return nil +} + +// PrintPage prints a given page as hexidecimal. +func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *DumpCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt dump -page PAGEID PATH + +Dump prints a hexidecimal dump of a single page. +`, "\n") +} + +// PageCommand represents the "page" command execution. +type PageCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newPageCommand returns a PageCommand. +func newPageCommand(m *Main) *PageCommand { + return &PageCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PageCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Retrieve page info and page size. + p, buf, err := ReadPage(path, pageID) + if err != nil { + return err + } + + // Print basic page info. + fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) + fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) + fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) + + // Print type-specific data. + switch p.Type() { + case "meta": + err = cmd.PrintMeta(cmd.Stdout, buf) + case "leaf": + err = cmd.PrintLeaf(cmd.Stdout, buf) + case "branch": + err = cmd.PrintBranch(cmd.Stdout, buf) + case "freelist": + err = cmd.PrintFreelist(cmd.Stdout, buf) + } + if err != nil { + return err + } + } + + return nil +} + +// PrintMeta prints the data from the meta page. +func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") + return nil +} + +// PrintLeaf prints the data for a leaf page. +func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + // Format value as string. + var v string + if (e.flags & uint32(bucketLeafFlag)) != 0 { + b := (*bucket)(unsafe.Pointer(&e.value()[0])) + v = fmt.Sprintf("", b.root, b.sequence) + } else if isPrintable(string(e.value())) { + k = fmt.Sprintf("%q", string(e.value())) + } else { + k = fmt.Sprintf("%x", string(e.value())) + } + + fmt.Fprintf(w, "%s: %s\n", k, v) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintBranch prints the data for a leaf page. +func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.branchPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + fmt.Fprintf(w, "%s: \n", k, e.pgid) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintFreelist prints the data for a freelist page. +func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each page in the freelist. + ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) + for i := uint16(0); i < p.count; i++ { + fmt.Fprintf(w, "%d\n", ids[i]) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintPage prints a given page as hexidecimal. +func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *PageCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt page -page PATH pageid [pageid...] + +Page prints one or more pages in human readable format. +`, "\n") +} + +// PagesCommand represents the "pages" command execution. +type PagesCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPagesCommand returns a PagesCommand. +func newPagesCommand(m *Main) *PagesCommand { + return &PagesCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PagesCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + // Write header. + fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") + fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") + + return db.Update(func(tx *bolt.Tx) error { + var id int + for { + p, err := tx.Page(id) + if err != nil { + return &PageError{ID: id, Err: err} + } else if p == nil { + break + } + + // Only display count and overflow if this is a non-free page. + var count, overflow string + if p.Type != "free" { + count = strconv.Itoa(p.Count) + if p.OverflowCount > 0 { + overflow = strconv.Itoa(p.OverflowCount) + } + } + + // Print table row. + fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) + + // Move to the next non-overflow page. + id += 1 + if p.Type != "free" { + id += p.OverflowCount + } + } + return nil + }) +} + +// Usage returns the help message. +func (cmd *PagesCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt pages PATH + +Pages prints a table of pages with their type (meta, leaf, branch, freelist). +Leaf and branch pages will show a key count in the "items" column while the +freelist will show the number of free pages in the "items" column. + +The "overflow" column shows the number of blocks that the page spills over +into. Normally there is no overflow but large keys and values can cause +a single page to take up multiple blocks. +`, "\n") +} + +// StatsCommand represents the "stats" command execution. +type StatsCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewStatsCommand returns a StatsCommand. +func newStatsCommand(m *Main) *StatsCommand { + return &StatsCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *StatsCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path, prefix := fs.Arg(0), fs.Arg(1) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx *bolt.Tx) error { + var s bolt.BucketStats + var count int + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + if bytes.HasPrefix(name, []byte(prefix)) { + s.Add(b.Stats()) + count += 1 + } + return nil + }); err != nil { + return err + } + + fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) + + fmt.Fprintln(cmd.Stdout, "Page count statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) + fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) + + fmt.Fprintln(cmd.Stdout, "Tree statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) + fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) + + fmt.Fprintln(cmd.Stdout, "Page size utilization") + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) + var percentage int + if s.BranchAlloc != 0 { + percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) + percentage = 0 + if s.LeafAlloc != 0 { + percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) + + fmt.Fprintln(cmd.Stdout, "Bucket statistics") + fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) + percentage = 0 + if s.LeafInuse != 0 { + percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) + + return nil + }) +} + +// Usage returns the help message. +func (cmd *StatsCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt stats PATH + +Stats performs an extensive search of the database to track every page +reference. It starts at the current meta page and recursively iterates +through every accessible bucket. + +The following errors can be reported: + + already freed + The page is referenced more than once in the freelist. + + unreachable unfreed + The page is not referenced by a bucket or in the freelist. + + reachable freed + The page is referenced by a bucket but is also in the freelist. + + out of bounds + A page is referenced that is above the high water mark. + + multiple references + A page is referenced by more than one other page. + + invalid type + The page type is not "meta", "leaf", "branch", or "freelist". + +No errors should occur in your database. However, if for some reason you +experience corruption, please submit a ticket to the Bolt project page: + + https://github.com/boltdb/bolt/issues +`, "\n") +} + +var benchBucketName = []byte("bench") + +// BenchCommand represents the "bench" command execution. +type BenchCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewBenchCommand returns a BenchCommand using the +func newBenchCommand(m *Main) *BenchCommand { + return &BenchCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the "bench" command. +func (cmd *BenchCommand) Run(args ...string) error { + // Parse CLI arguments. + options, err := cmd.ParseFlags(args) + if err != nil { + return err + } + + // Remove path if "-work" is not set. Otherwise keep path. + if options.Work { + fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) + } else { + defer os.Remove(options.Path) + } + + // Create database. + db, err := bolt.Open(options.Path, 0666, nil) + if err != nil { + return err + } + db.NoSync = options.NoSync + defer db.Close() + + // Write to the database. + var results BenchResults + if err := cmd.runWrites(db, options, &results); err != nil { + return fmt.Errorf("write: %v", err) + } + + // Read from the database. + if err := cmd.runReads(db, options, &results); err != nil { + return fmt.Errorf("bench: read: %s", err) + } + + // Print results. + fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) + fmt.Fprintln(os.Stderr, "") + return nil +} + +// ParseFlags parses the command line flags. +func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { + var options BenchOptions + + // Parse flagset. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") + fs.StringVar(&options.WriteMode, "write-mode", "seq", "") + fs.StringVar(&options.ReadMode, "read-mode", "seq", "") + fs.IntVar(&options.Iterations, "count", 1000, "") + fs.IntVar(&options.BatchSize, "batch-size", 0, "") + fs.IntVar(&options.KeySize, "key-size", 8, "") + fs.IntVar(&options.ValueSize, "value-size", 32, "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.StringVar(&options.BlockProfile, "blockprofile", "", "") + fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") + fs.BoolVar(&options.NoSync, "no-sync", false, "") + fs.BoolVar(&options.Work, "work", false, "") + fs.StringVar(&options.Path, "path", "", "") + fs.SetOutput(cmd.Stderr) + if err := fs.Parse(args); err != nil { + return nil, err + } + + // Set batch size to iteration size if not set. + // Require that batch size can be evenly divided by the iteration count. + if options.BatchSize == 0 { + options.BatchSize = options.Iterations + } else if options.Iterations%options.BatchSize != 0 { + return nil, ErrNonDivisibleBatchSize + } + + // Generate temp path if one is not passed in. + if options.Path == "" { + f, err := ioutil.TempFile("", "bolt-bench-") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + f.Close() + os.Remove(f.Name()) + options.Path = f.Name() + } + + return &options, nil +} + +// Writes to the database. +func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for writes. + if options.ProfileMode == "rw" || options.ProfileMode == "w" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.WriteMode { + case "seq": + err = cmd.runWritesSequential(db, options, results) + case "rnd": + err = cmd.runWritesRandom(db, options, results) + case "seq-nest": + err = cmd.runWritesSequentialNested(db, options, results) + case "rnd-nest": + err = cmd.runWritesRandomNested(db, options, results) + default: + return fmt.Errorf("invalid write mode: %s", options.WriteMode) + } + + // Save time to write. + results.WriteDuration = time.Since(t) + + // Stop profiling for writes only. + if options.ProfileMode == "w" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + key := make([]byte, options.KeySize) + value := make([]byte, options.ValueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + top, err := tx.CreateBucketIfNotExists(benchBucketName) + if err != nil { + return err + } + top.FillPercent = options.FillPercent + + // Create bucket key. + name := make([]byte, options.KeySize) + binary.BigEndian.PutUint32(name, keySource()) + + // Create bucket. + b, err := top.CreateBucketIfNotExists(name) + if err != nil { + return err + } + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + var key = make([]byte, options.KeySize) + var value = make([]byte, options.ValueSize) + + // Generate key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert value into subbucket. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +// Reads from the database. +func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for reads. + if options.ProfileMode == "r" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.ReadMode { + case "seq": + switch options.WriteMode { + case "seq-nest", "rnd-nest": + err = cmd.runReadsSequentialNested(db, options, results) + default: + err = cmd.runReadsSequential(db, options, results) + } + default: + return fmt.Errorf("invalid read mode: %s", options.ReadMode) + } + + // Save read time. + results.ReadDuration = time.Since(t) + + // Stop profiling for reads. + if options.ProfileMode == "rw" || options.ProfileMode == "r" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + + c := tx.Bucket(benchBucketName).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return errors.New("invalid value") + } + count++ + } + + if options.WriteMode == "seq" && count != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + var top = tx.Bucket(benchBucketName) + if err := top.ForEach(func(name, _ []byte) error { + c := top.Bucket(name).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return ErrInvalidValue + } + count++ + } + return nil + }); err != nil { + return err + } + + if options.WriteMode == "seq-nest" && count != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +// File handlers for the various profiles. +var cpuprofile, memprofile, blockprofile *os.File + +// Starts all profiles set on the options. +func (cmd *BenchCommand) startProfiling(options *BenchOptions) { + var err error + + // Start CPU profiling. + if options.CPUProfile != "" { + cpuprofile, err = os.Create(options.CPUProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) + os.Exit(1) + } + pprof.StartCPUProfile(cpuprofile) + } + + // Start memory profiling. + if options.MemProfile != "" { + memprofile, err = os.Create(options.MemProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) + os.Exit(1) + } + runtime.MemProfileRate = 4096 + } + + // Start fatal profiling. + if options.BlockProfile != "" { + blockprofile, err = os.Create(options.BlockProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) + os.Exit(1) + } + runtime.SetBlockProfileRate(1) + } +} + +// Stops all profiles. +func (cmd *BenchCommand) stopProfiling() { + if cpuprofile != nil { + pprof.StopCPUProfile() + cpuprofile.Close() + cpuprofile = nil + } + + if memprofile != nil { + pprof.Lookup("heap").WriteTo(memprofile, 0) + memprofile.Close() + memprofile = nil + } + + if blockprofile != nil { + pprof.Lookup("block").WriteTo(blockprofile, 0) + blockprofile.Close() + blockprofile = nil + runtime.SetBlockProfileRate(0) + } +} + +// BenchOptions represents the set of options that can be passed to "bolt bench". +type BenchOptions struct { + ProfileMode string + WriteMode string + ReadMode string + Iterations int + BatchSize int + KeySize int + ValueSize int + CPUProfile string + MemProfile string + BlockProfile string + StatsInterval time.Duration + FillPercent float64 + NoSync bool + Work bool + Path string +} + +// BenchResults represents the performance results of the benchmark. +type BenchResults struct { + WriteOps int + WriteDuration time.Duration + ReadOps int + ReadDuration time.Duration +} + +// Returns the duration for a single write operation. +func (r *BenchResults) WriteOpDuration() time.Duration { + if r.WriteOps == 0 { + return 0 + } + return r.WriteDuration / time.Duration(r.WriteOps) +} + +// Returns average number of write operations that can be performed per second. +func (r *BenchResults) WriteOpsPerSecond() int { + var op = r.WriteOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +// Returns the duration for a single read operation. +func (r *BenchResults) ReadOpDuration() time.Duration { + if r.ReadOps == 0 { + return 0 + } + return r.ReadDuration / time.Duration(r.ReadOps) +} + +// Returns average number of read operations that can be performed per second. +func (r *BenchResults) ReadOpsPerSecond() int { + var op = r.ReadOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +type PageError struct { + ID int + Err error +} + +func (e *PageError) Error() string { + return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) +} + +// isPrintable returns true if the string is valid unicode and contains only printable runes. +func isPrintable(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, ch := range s { + if !unicode.IsPrint(ch) { + return false + } + } + return true +} + +// ReadPage reads page info & full page data from a path. +// This is not transactionally safe. +func ReadPage(path string, pageID int) (*page, []byte, error) { + // Find page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return nil, nil, fmt.Errorf("read page size: %s", err) + } + + // Open database file. + f, err := os.Open(path) + if err != nil { + return nil, nil, err + } + defer f.Close() + + // Read one block into buffer. + buf := make([]byte, pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + + // Determine total number of blocks. + p := (*page)(unsafe.Pointer(&buf[0])) + overflowN := p.overflow + + // Re-read entire page (with overflow) into buffer. + buf = make([]byte, (int(overflowN)+1)*pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + p = (*page)(unsafe.Pointer(&buf[0])) + + return p, buf, nil +} + +// ReadPageSize reads page size a path. +// This is not transactionally safe. +func ReadPageSize(path string) (int, error) { + // Open database file. + f, err := os.Open(path) + if err != nil { + return 0, err + } + defer f.Close() + + // Read 4KB chunk. + buf := make([]byte, 4096) + if _, err := io.ReadFull(f, buf); err != nil { + return 0, err + } + + // Read page size from metadata. + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + return int(m.pageSize), nil +} + +// atois parses a slice of strings into integers. +func atois(strs []string) ([]int, error) { + var a []int + for _, str := range strs { + i, err := strconv.Atoi(str) + if err != nil { + return nil, err + } + a = append(a, i) + } + return a, nil +} + +// DO NOT EDIT. Copied from the "bolt" package. +const maxAllocSize = 0xFFFFFFF + +// DO NOT EDIT. Copied from the "bolt" package. +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +// DO NOT EDIT. Copied from the "bolt" package. +const bucketLeafFlag = 0x01 + +// DO NOT EDIT. Copied from the "bolt" package. +type pgid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type txid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type bucket struct { + root pgid + sequence uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) Type() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go new file mode 100644 index 000000000..b9e8c671f --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go @@ -0,0 +1,145 @@ +package main_test + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/boltdb/bolt" + "github.com/boltdb/bolt/cmd/bolt" +) + +// Ensure the "info" command can print information about a database. +func TestInfoCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + db.DB.Close() + defer db.Close() + + // Run the info command. + m := NewMain() + if err := m.Run("info", db.Path); err != nil { + t.Fatal(err) + } +} + +// Ensure the "stats" command can execute correctly. +func TestStatsCommand_Run(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + // Create "foo" bucket. + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + return err + } + for i := 0; i < 10; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "bar" bucket. + b, err = tx.CreateBucket([]byte("bar")) + if err != nil { + return err + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "baz" bucket. + b, err = tx.CreateBucket([]byte("baz")) + if err != nil { + return err + } + if err := b.Put([]byte("key"), []byte("value")); err != nil { + return err + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 3 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 1\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 111\n" + + "\tNumber of levels in B+tree: 1\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 4096\n" + + "\tBytes actually used for leaf data: 1996 (48%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 3\n" + + "\tTotal number on inlined buckets: 2 (66%)\n" + + "\tBytes used for inlined buckets: 236 (11%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Main represents a test wrapper for main.Main that records output. +type Main struct { + *main.Main + Stdin bytes.Buffer + Stdout bytes.Buffer + Stderr bytes.Buffer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + m := &Main{Main: main.NewMain()} + m.Main.Stdin = &m.Stdin + m.Main.Stdout = &m.Stdout + m.Main.Stderr = &m.Stderr + return m +} + +// MustOpen creates a Bolt database in a temporary location. +func MustOpen(mode os.FileMode, options *bolt.Options) *DB { + // Create temporary path. + f, _ := ioutil.TempFile("", "bolt-") + f.Close() + os.Remove(f.Name()) + + db, err := bolt.Open(f.Name(), mode, options) + if err != nil { + panic(err.Error()) + } + return &DB{DB: db, Path: f.Name()} +} + +// DB is a test wrapper for bolt.DB. +type DB struct { + *bolt.DB + Path string +} + +// Close closes and removes the database. +func (db *DB) Close() error { + defer os.Remove(db.Path) + return db.DB.Close() +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go new file mode 100644 index 000000000..006c54889 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,384 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + return c.keyValue() +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go new file mode 100644 index 000000000..b12e1f915 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go @@ -0,0 +1,511 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "sort" + "testing" + "testing/quick" + + "github.com/boltdb/bolt" +) + +// Ensure that a cursor can return a reference to the bucket that created it. +func TestCursor_Bucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucket([]byte("widgets")) + c := b.Cursor() + equals(t, b, c.Bucket()) + return nil + }) +} + +// Ensure that a Tx cursor can seek to the appropriate keys. +func TestCursor_Seek(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + ok(t, b.Put([]byte("foo"), []byte("0001"))) + ok(t, b.Put([]byte("bar"), []byte("0002"))) + ok(t, b.Put([]byte("baz"), []byte("0003"))) + _, err = b.CreateBucket([]byte("bkt")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + + // Exact match should go to the key. + k, v := c.Seek([]byte("bar")) + equals(t, []byte("bar"), k) + equals(t, []byte("0002"), v) + + // Inexact match should go to the next key. + k, v = c.Seek([]byte("bas")) + equals(t, []byte("baz"), k) + equals(t, []byte("0003"), v) + + // Low key should go to the first key. + k, v = c.Seek([]byte("")) + equals(t, []byte("bar"), k) + equals(t, []byte("0002"), v) + + // High key should return no key. + k, v = c.Seek([]byte("zzz")) + assert(t, k == nil, "") + assert(t, v == nil, "") + + // Buckets should return their key but no value. + k, v = c.Seek([]byte("bkt")) + equals(t, []byte("bkt"), k) + assert(t, v == nil, "") + + return nil + }) +} + +func TestCursor_Delete(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var count = 1000 + + // Insert every other key between 0 and $count. + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucket([]byte("widgets")) + for i := 0; i < count; i += 1 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(i)) + b.Put(k, make([]byte, 100)) + } + b.CreateBucket([]byte("sub")) + return nil + }) + + db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + bound := make([]byte, 8) + binary.BigEndian.PutUint64(bound, uint64(count/2)) + for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + c.Seek([]byte("sub")) + err := c.Delete() + equals(t, err, bolt.ErrIncompatibleValue) + return nil + }) + + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + equals(t, b.Stats().KeyN, count/2+1) + return nil + }) +} + +// Ensure that a Tx cursor can seek to the appropriate keys when there are a +// large number of keys. This test also checks that seek will always move +// forward to the next key. +// +// Related: https://github.com/boltdb/bolt/pull/187 +func TestCursor_Seek_Large(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var count = 10000 + + // Insert every other key between 0 and $count. + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucket([]byte("widgets")) + for i := 0; i < count; i += 100 { + for j := i; j < i+100; j += 2 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(j)) + b.Put(k, make([]byte, 100)) + } + } + return nil + }) + + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + for i := 0; i < count; i++ { + seek := make([]byte, 8) + binary.BigEndian.PutUint64(seek, uint64(i)) + + k, _ := c.Seek(seek) + + // The last seek is beyond the end of the the range so + // it should return nil. + if i == count-1 { + assert(t, k == nil, "") + continue + } + + // Otherwise we should seek to the exact key or the next key. + num := binary.BigEndian.Uint64(k) + if i%2 == 0 { + equals(t, uint64(i), num) + } else { + equals(t, uint64(i+1), num) + } + } + + return nil + }) +} + +// Ensure that a cursor can iterate over an empty bucket without error. +func TestCursor_EmptyBucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.First() + assert(t, k == nil, "") + assert(t, v == nil, "") + return nil + }) +} + +// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. +func TestCursor_EmptyBucketReverse(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.Last() + assert(t, k == nil, "") + assert(t, v == nil, "") + return nil + }) +} + +// Ensure that a Tx cursor can iterate over a single root with a couple elements. +func TestCursor_Iterate_Leaf(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) + return nil + }) + tx, _ := db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + + k, v := c.First() + equals(t, string(k), "bar") + equals(t, v, []byte{1}) + + k, v = c.Next() + equals(t, string(k), "baz") + equals(t, v, []byte{}) + + k, v = c.Next() + equals(t, string(k), "foo") + equals(t, v, []byte{0}) + + k, v = c.Next() + assert(t, k == nil, "") + assert(t, v == nil, "") + + k, v = c.Next() + assert(t, k == nil, "") + assert(t, v == nil, "") + + tx.Rollback() +} + +// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. +func TestCursor_LeafRootReverse(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) + return nil + }) + tx, _ := db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + + k, v := c.Last() + equals(t, string(k), "foo") + equals(t, v, []byte{0}) + + k, v = c.Prev() + equals(t, string(k), "baz") + equals(t, v, []byte{}) + + k, v = c.Prev() + equals(t, string(k), "bar") + equals(t, v, []byte{1}) + + k, v = c.Prev() + assert(t, k == nil, "") + assert(t, v == nil, "") + + k, v = c.Prev() + assert(t, k == nil, "") + assert(t, v == nil, "") + + tx.Rollback() +} + +// Ensure that a Tx cursor can restart from the beginning. +func TestCursor_Restart(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) + return nil + }) + + tx, _ := db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + + k, _ := c.First() + equals(t, string(k), "bar") + + k, _ = c.Next() + equals(t, string(k), "foo") + + k, _ = c.First() + equals(t, string(k), "bar") + + k, _ = c.Next() + equals(t, string(k), "foo") + + tx.Rollback() +} + +// Ensure that a Tx can iterate over all elements in a bucket. +func TestCursor_QuickCheck(t *testing.T) { + f := func(items testdata) bool { + db := NewTestDB() + defer db.Close() + + // Bulk insert all values. + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + ok(t, tx.Commit()) + + // Sort test data. + sort.Sort(items) + + // Iterate over all items and check consistency. + var index = 0 + tx, _ = db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { + equals(t, k, items[index].Key) + equals(t, v, items[index].Value) + index++ + } + equals(t, len(items), index) + tx.Rollback() + + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can iterate over all elements in a bucket in reverse. +func TestCursor_QuickCheck_Reverse(t *testing.T) { + f := func(items testdata) bool { + db := NewTestDB() + defer db.Close() + + // Bulk insert all values. + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + ok(t, tx.Commit()) + + // Sort test data. + sort.Sort(revtestdata(items)) + + // Iterate over all items and check consistency. + var index = 0 + tx, _ = db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { + equals(t, k, items[index].Key) + equals(t, v, items[index].Value) + index++ + } + equals(t, len(items), index) + tx.Rollback() + + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a Tx cursor can iterate over subbuckets. +func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + _, err = b.CreateBucket([]byte("foo")) + ok(t, err) + _, err = b.CreateBucket([]byte("bar")) + ok(t, err) + _, err = b.CreateBucket([]byte("baz")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + names = append(names, string(k)) + assert(t, v == nil, "") + } + equals(t, names, []string{"bar", "baz", "foo"}) + return nil + }) +} + +// Ensure that a Tx cursor can reverse iterate over subbuckets. +func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + _, err = b.CreateBucket([]byte("foo")) + ok(t, err) + _, err = b.CreateBucket([]byte("bar")) + ok(t, err) + _, err = b.CreateBucket([]byte("baz")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil; k, v = c.Prev() { + names = append(names, string(k)) + assert(t, v == nil, "") + } + equals(t, names, []string{"foo", "baz", "bar"}) + return nil + }) +} + +func ExampleCursor() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Start a read-write transaction. + db.Update(func(tx *bolt.Tx) error { + // Create a new bucket. + tx.CreateBucket([]byte("animals")) + + // Insert data into a bucket. + b := tx.Bucket([]byte("animals")) + b.Put([]byte("dog"), []byte("fun")) + b.Put([]byte("cat"), []byte("lame")) + b.Put([]byte("liger"), []byte("awesome")) + + // Create a cursor for iteration. + c := b.Cursor() + + // Iterate over items in sorted key order. This starts from the + // first key/value pair and updates the k/v variables to the + // next key/value on each iteration. + // + // The loop finishes at the end of the cursor when a nil key is returned. + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("A %s is %s.\n", k, v) + } + + return nil + }) + + // Output: + // A cat is lame. + // A dog is fun. + // A liger is awesome. +} + +func ExampleCursor_reverse() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Start a read-write transaction. + db.Update(func(tx *bolt.Tx) error { + // Create a new bucket. + tx.CreateBucket([]byte("animals")) + + // Insert data into a bucket. + b := tx.Bucket([]byte("animals")) + b.Put([]byte("dog"), []byte("fun")) + b.Put([]byte("cat"), []byte("lame")) + b.Put([]byte("liger"), []byte("awesome")) + + // Create a cursor for iteration. + c := b.Cursor() + + // Iterate over items in reverse sorted key order. This starts + // from the last key/value pair and updates the k/v variables to + // the previous key/value on each iteration. + // + // The loop finishes at the beginning of the cursor when a nil key + // is returned. + for k, v := c.Last(); k != nil; k, v = c.Prev() { + fmt.Printf("A %s is %s.\n", k, v) + } + + return nil + }) + + // Output: + // A liger is awesome. + // A dog is fun. + // A cat is lame. +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go new file mode 100644 index 000000000..d39c4aa9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go @@ -0,0 +1,792 @@ +package bolt + +import ( + "fmt" + "hash/fnv" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronzied using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + path string + file *os.File + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, fmt.Errorf("stat error: %s", err) + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + return nil, fmt.Errorf("meta0 error: %s", err) + } + db.pageSize = int(m.pageSize) + } + } + + // Memory map the data file. + if err := db.mmap(0); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. + if err := db.meta0.validate(); err != nil { + return fmt.Errorf("meta0 error: %s", err) + } + if err := db.meta1.validate(); err != nil { + return fmt.Errorf("meta1 error: %s", err) + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 1MB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + db.opened = false + + db.freelist = nil + db.path = "" + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + _ = funlock(db.file) + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be depedent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + if db.meta0.txid > db.meta1.txid { + return db.meta0 + } + return db.meta1 +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + buf := make([]byte, count*db.pageSize) + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } else if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go new file mode 100644 index 000000000..dddf22b46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go @@ -0,0 +1,903 @@ +package bolt_test + +import ( + "encoding/binary" + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "regexp" + "runtime" + "sort" + "strings" + "testing" + "time" + + "github.com/boltdb/bolt" +) + +var statsFlag = flag.Bool("stats", false, "show performance stats") + +// Ensure that opening a database with a bad path returns an error. +func TestOpen_BadPath(t *testing.T) { + db, err := bolt.Open("", 0666, nil) + assert(t, err != nil, "err: %s", err) + assert(t, db == nil, "") +} + +// Ensure that a database can be opened without error. +func TestOpen(t *testing.T) { + path := tempfile() + defer os.Remove(path) + db, err := bolt.Open(path, 0666, nil) + assert(t, db != nil, "") + ok(t, err) + equals(t, db.Path(), path) + ok(t, db.Close()) +} + +// Ensure that opening an already open database file will timeout. +func TestOpen_Timeout(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("timeout not supported on windows") + } + + path := tempfile() + defer os.Remove(path) + + // Open a data file. + db0, err := bolt.Open(path, 0666, nil) + assert(t, db0 != nil, "") + ok(t, err) + + // Attempt to open the database again. + start := time.Now() + db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) + assert(t, db1 == nil, "") + equals(t, bolt.ErrTimeout, err) + assert(t, time.Since(start) > 100*time.Millisecond, "") + + db0.Close() +} + +// Ensure that opening an already open database file will wait until its closed. +func TestOpen_Wait(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("timeout not supported on windows") + } + + path := tempfile() + defer os.Remove(path) + + // Open a data file. + db0, err := bolt.Open(path, 0666, nil) + assert(t, db0 != nil, "") + ok(t, err) + + // Close it in just a bit. + time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) + + // Attempt to open the database again. + start := time.Now() + db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) + assert(t, db1 != nil, "") + ok(t, err) + assert(t, time.Since(start) > 100*time.Millisecond, "") +} + +// Ensure that opening a database does not increase its size. +// https://github.com/boltdb/bolt/issues/291 +func TestOpen_Size(t *testing.T) { + // Open a data file. + db := NewTestDB() + path := db.Path() + defer db.Close() + + // Insert until we get above the minimum 4MB size. + ok(t, db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for i := 0; i < 10000; i++ { + ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000))) + } + return nil + })) + + // Close database and grab the size. + db.DB.Close() + sz := fileSize(path) + if sz == 0 { + t.Fatalf("unexpected new file size: %d", sz) + } + + // Reopen database, update, and check size again. + db0, err := bolt.Open(path, 0666, nil) + ok(t, err) + ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) + ok(t, db0.Close()) + newSz := fileSize(path) + if newSz == 0 { + t.Fatalf("unexpected new file size: %d", newSz) + } + + // Compare the original size with the new size. + if sz != newSz { + t.Fatalf("unexpected file growth: %d => %d", sz, newSz) + } +} + +// Ensure that opening a database beyond the max step size does not increase its size. +// https://github.com/boltdb/bolt/issues/303 +func TestOpen_Size_Large(t *testing.T) { + if testing.Short() { + t.Skip("short mode") + } + + // Open a data file. + db := NewTestDB() + path := db.Path() + defer db.Close() + + // Insert until we get above the minimum 4MB size. + var index uint64 + for i := 0; i < 10000; i++ { + ok(t, db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for j := 0; j < 1000; j++ { + ok(t, b.Put(u64tob(index), make([]byte, 50))) + index++ + } + return nil + })) + } + + // Close database and grab the size. + db.DB.Close() + sz := fileSize(path) + if sz == 0 { + t.Fatalf("unexpected new file size: %d", sz) + } else if sz < (1 << 30) { + t.Fatalf("expected larger initial size: %d", sz) + } + + // Reopen database, update, and check size again. + db0, err := bolt.Open(path, 0666, nil) + ok(t, err) + ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) + ok(t, db0.Close()) + newSz := fileSize(path) + if newSz == 0 { + t.Fatalf("unexpected new file size: %d", newSz) + } + + // Compare the original size with the new size. + if sz != newSz { + t.Fatalf("unexpected file growth: %d => %d", sz, newSz) + } +} + +// Ensure that a re-opened database is consistent. +func TestOpen_Check(t *testing.T) { + path := tempfile() + defer os.Remove(path) + + db, err := bolt.Open(path, 0666, nil) + ok(t, err) + ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + db.Close() + + db, err = bolt.Open(path, 0666, nil) + ok(t, err) + ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + db.Close() +} + +// Ensure that the database returns an error if the file handle cannot be open. +func TestDB_Open_FileError(t *testing.T) { + path := tempfile() + defer os.Remove(path) + + _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) + assert(t, err.(*os.PathError) != nil, "") + equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path) + equals(t, "open", err.(*os.PathError).Op) +} + +// Ensure that write errors to the meta file handler during initialization are returned. +func TestDB_Open_MetaInitWriteError(t *testing.T) { + t.Skip("pending") +} + +// Ensure that a database that is too small returns an error. +func TestDB_Open_FileTooSmall(t *testing.T) { + path := tempfile() + defer os.Remove(path) + + db, err := bolt.Open(path, 0666, nil) + ok(t, err) + db.Close() + + // corrupt the database + ok(t, os.Truncate(path, int64(os.Getpagesize()))) + + db, err = bolt.Open(path, 0666, nil) + equals(t, errors.New("file size too small"), err) +} + +// Ensure that a database can be opened in read-only mode by multiple processes +// and that a database can not be opened in read-write mode and in read-only +// mode at the same time. +func TestOpen_ReadOnly(t *testing.T) { + bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) + + path := tempfile() + defer os.Remove(path) + + // Open in read-write mode. + db, err := bolt.Open(path, 0666, nil) + ok(t, db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket(bucket) + if err != nil { + return err + } + return b.Put(key, value) + })) + assert(t, db != nil, "") + assert(t, !db.IsReadOnly(), "") + ok(t, err) + ok(t, db.Close()) + + // Open in read-only mode. + db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + ok(t, err) + defer db0.Close() + + // Opening in read-write mode should return an error. + _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}) + assert(t, err != nil, "") + + // And again (in read-only mode). + db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + ok(t, err) + defer db1.Close() + + // Verify both read-only databases are accessible. + for _, db := range []*bolt.DB{db0, db1} { + // Verify is is in read only mode indeed. + assert(t, db.IsReadOnly(), "") + + // Read-only databases should not allow updates. + assert(t, + bolt.ErrDatabaseReadOnly == db.Update(func(*bolt.Tx) error { + panic(`should never get here`) + }), + "") + + // Read-only databases should not allow beginning writable txns. + _, err = db.Begin(true) + assert(t, bolt.ErrDatabaseReadOnly == err, "") + + // Verify the data. + ok(t, db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bucket) + if b == nil { + return fmt.Errorf("expected bucket `%s`", string(bucket)) + } + + got := string(b.Get(key)) + expected := string(value) + if got != expected { + return fmt.Errorf("expected `%s`, got `%s`", expected, got) + } + return nil + })) + } +} + +// TODO(benbjohnson): Test corruption at every byte of the first two pages. + +// Ensure that a database cannot open a transaction when it's not open. +func TestDB_Begin_DatabaseNotOpen(t *testing.T) { + var db bolt.DB + tx, err := db.Begin(false) + assert(t, tx == nil, "") + equals(t, err, bolt.ErrDatabaseNotOpen) +} + +// Ensure that a read-write transaction can be retrieved. +func TestDB_BeginRW(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, err := db.Begin(true) + assert(t, tx != nil, "") + ok(t, err) + assert(t, tx.DB() == db.DB, "") + equals(t, tx.Writable(), true) + ok(t, tx.Commit()) +} + +// Ensure that opening a transaction while the DB is closed returns an error. +func TestDB_BeginRW_Closed(t *testing.T) { + var db bolt.DB + tx, err := db.Begin(true) + equals(t, err, bolt.ErrDatabaseNotOpen) + assert(t, tx == nil, "") +} + +func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } +func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } + +// Ensure that a database cannot close while transactions are open. +func testDB_Close_PendingTx(t *testing.T, writable bool) { + db := NewTestDB() + defer db.Close() + + // Start transaction. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + // Open update in separate goroutine. + done := make(chan struct{}) + go func() { + db.Close() + close(done) + }() + + // Ensure database hasn't closed. + time.Sleep(100 * time.Millisecond) + select { + case <-done: + t.Fatal("database closed too early") + default: + } + + // Commit transaction. + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Ensure database closed now. + time.Sleep(100 * time.Millisecond) + select { + case <-done: + default: + t.Fatal("database did not close") + } +} + +// Ensure a database can provide a transactional block. +func TestDB_Update(t *testing.T) { + db := NewTestDB() + defer db.Close() + err := db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + b.Put([]byte("foo"), []byte("bar")) + b.Put([]byte("baz"), []byte("bat")) + b.Delete([]byte("foo")) + return nil + }) + ok(t, err) + err = db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + return nil + }) + ok(t, err) +} + +// Ensure a closed database returns an error while running a transaction block +func TestDB_Update_Closed(t *testing.T) { + var db bolt.DB + err := db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) + equals(t, err, bolt.ErrDatabaseNotOpen) +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_Update_ManualCommit(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Commit() + }() + return nil + }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_Update_ManualRollback(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Rollback() + }() + return nil + }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_View_ManualCommit(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Commit() + }() + return nil + }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_View_ManualRollback(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Rollback() + }() + return nil + }) + assert(t, ok, "expected panic") +} + +// Ensure a write transaction that panics does not hold open locks. +func TestDB_Update_Panic(t *testing.T) { + db := NewTestDB() + defer db.Close() + + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: update", r) + } + }() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + panic("omg") + }) + }() + + // Verify we can update again. + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + ok(t, err) + + // Verify that our change persisted. + err = db.Update(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + return nil + }) +} + +// Ensure a database can return an error through a read-only transactional block. +func TestDB_View_Error(t *testing.T) { + db := NewTestDB() + defer db.Close() + err := db.View(func(tx *bolt.Tx) error { + return errors.New("xxx") + }) + equals(t, errors.New("xxx"), err) +} + +// Ensure a read transaction that panics does not hold open locks. +func TestDB_View_Panic(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) + + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: view", r) + } + }() + db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + panic("omg") + }) + }() + + // Verify that we can still use read transactions. + db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + return nil + }) +} + +// Ensure that an error is returned when a database write fails. +func TestDB_Commit_WriteFail(t *testing.T) { + t.Skip("pending") // TODO(benbjohnson) +} + +// Ensure that DB stats can be returned. +func TestDB_Stats(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + stats := db.Stats() + equals(t, 2, stats.TxStats.PageCount) + equals(t, 0, stats.FreePageN) + equals(t, 2, stats.PendingPageN) +} + +// Ensure that database pages are in expected order and type. +func TestDB_Consistency(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + + for i := 0; i < 10; i++ { + db.Update(func(tx *bolt.Tx) error { + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + return nil + }) + } + db.Update(func(tx *bolt.Tx) error { + p, _ := tx.Page(0) + assert(t, p != nil, "") + equals(t, "meta", p.Type) + + p, _ = tx.Page(1) + assert(t, p != nil, "") + equals(t, "meta", p.Type) + + p, _ = tx.Page(2) + assert(t, p != nil, "") + equals(t, "free", p.Type) + + p, _ = tx.Page(3) + assert(t, p != nil, "") + equals(t, "free", p.Type) + + p, _ = tx.Page(4) + assert(t, p != nil, "") + equals(t, "leaf", p.Type) + + p, _ = tx.Page(5) + assert(t, p != nil, "") + equals(t, "freelist", p.Type) + + p, _ = tx.Page(6) + assert(t, p == nil, "") + return nil + }) +} + +// Ensure that DB stats can be substracted from one another. +func TestDBStats_Sub(t *testing.T) { + var a, b bolt.Stats + a.TxStats.PageCount = 3 + a.FreePageN = 4 + b.TxStats.PageCount = 10 + b.FreePageN = 14 + diff := b.Sub(&a) + equals(t, 7, diff.TxStats.PageCount) + // free page stats are copied from the receiver and not subtracted + equals(t, 14, diff.FreePageN) +} + +func ExampleDB_Update() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Execute several commands within a write transaction. + err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }) + + // If our transactional block didn't return an error then our data is saved. + if err == nil { + db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }) + } + + // Output: + // The value of 'foo' is: bar +} + +func ExampleDB_View() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Insert data into a bucket. + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("people")) + b := tx.Bucket([]byte("people")) + b.Put([]byte("john"), []byte("doe")) + b.Put([]byte("susy"), []byte("que")) + return nil + }) + + // Access data from within a read-only transactional block. + db.View(func(tx *bolt.Tx) error { + v := tx.Bucket([]byte("people")).Get([]byte("john")) + fmt.Printf("John's last name is %s.\n", v) + return nil + }) + + // Output: + // John's last name is doe. +} + +func ExampleDB_Begin_ReadOnly() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Create a bucket. + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + + // Create several keys in a transaction. + tx, _ := db.Begin(true) + b := tx.Bucket([]byte("widgets")) + b.Put([]byte("john"), []byte("blue")) + b.Put([]byte("abby"), []byte("red")) + b.Put([]byte("zephyr"), []byte("purple")) + tx.Commit() + + // Iterate over the values in sorted key order. + tx, _ = db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("%s likes %s\n", k, v) + } + tx.Rollback() + + // Output: + // abby likes red + // john likes blue + // zephyr likes purple +} + +// TestDB represents a wrapper around a Bolt DB to handle temporary file +// creation and automatic cleanup on close. +type TestDB struct { + *bolt.DB +} + +// NewTestDB returns a new instance of TestDB. +func NewTestDB() *TestDB { + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + panic("cannot open db: " + err.Error()) + } + return &TestDB{db} +} + +// MustView executes a read-only function. Panic on error. +func (db *TestDB) MustView(fn func(tx *bolt.Tx) error) { + if err := db.DB.View(func(tx *bolt.Tx) error { + return fn(tx) + }); err != nil { + panic(err.Error()) + } +} + +// MustUpdate executes a read-write function. Panic on error. +func (db *TestDB) MustUpdate(fn func(tx *bolt.Tx) error) { + if err := db.DB.View(func(tx *bolt.Tx) error { + return fn(tx) + }); err != nil { + panic(err.Error()) + } +} + +// MustCreateBucket creates a new bucket. Panic on error. +func (db *TestDB) MustCreateBucket(name []byte) { + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte(name)) + return err + }); err != nil { + panic(err.Error()) + } +} + +// Close closes the database and deletes the underlying file. +func (db *TestDB) Close() { + // Log statistics. + if *statsFlag { + db.PrintStats() + } + + // Check database consistency after every test. + db.MustCheck() + + // Close database and remove file. + defer os.Remove(db.Path()) + db.DB.Close() +} + +// PrintStats prints the database stats +func (db *TestDB) PrintStats() { + var stats = db.Stats() + fmt.Printf("[db] %-20s %-20s %-20s\n", + fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), + fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), + fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), + ) + fmt.Printf(" %-20s %-20s %-20s\n", + fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), + fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), + fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), + ) +} + +// MustCheck runs a consistency check on the database and panics if any errors are found. +func (db *TestDB) MustCheck() { + db.Update(func(tx *bolt.Tx) error { + // Collect all the errors. + var errors []error + for err := range tx.Check() { + errors = append(errors, err) + if len(errors) > 10 { + break + } + } + + // If errors occurred, copy the DB and print the errors. + if len(errors) > 0 { + var path = tempfile() + tx.CopyFile(path, 0600) + + // Print errors. + fmt.Print("\n\n") + fmt.Printf("consistency check failed (%d errors)\n", len(errors)) + for _, err := range errors { + fmt.Println(err) + } + fmt.Println("") + fmt.Println("db saved to:") + fmt.Println(path) + fmt.Print("\n\n") + os.Exit(-1) + } + + return nil + }) +} + +// CopyTempFile copies a database to a temporary file. +func (db *TestDB) CopyTempFile() { + path := tempfile() + db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) }) + fmt.Println("db copied to: ", path) +} + +// tempfile returns a temporary file path. +func tempfile() string { + f, _ := ioutil.TempFile("", "bolt-") + f.Close() + os.Remove(f.Name()) + return f.Name() +} + +// mustContainKeys checks that a bucket contains a given set of keys. +func mustContainKeys(b *bolt.Bucket, m map[string]string) { + found := make(map[string]string) + b.ForEach(func(k, _ []byte) error { + found[string(k)] = "" + return nil + }) + + // Check for keys found in bucket that shouldn't be there. + var keys []string + for k, _ := range found { + if _, ok := m[string(k)]; !ok { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) + } + + // Check for keys not found in bucket that should be there. + for k, _ := range m { + if _, ok := found[string(k)]; !ok { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) + } +} + +func trunc(b []byte, length int) []byte { + if length < len(b) { + return b[:length] + } + return b +} + +func truncDuration(d time.Duration) string { + return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") +} + +func fileSize(path string) int64 { + fi, err := os.Stat(path) + if err != nil { + return 0 + } + return fi.Size() +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +// u64tob converts a uint64 into an 8-byte slice. +func u64tob(v uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b +} + +// btou64 converts an 8-byte slice into an uint64. +func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go b/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go new file mode 100644 index 000000000..cc937845d --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go b/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go new file mode 100644 index 000000000..6883786d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go @@ -0,0 +1,70 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when a data file is not a Bolt-formatted database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go new file mode 100644 index 000000000..0161948fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,242 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go new file mode 100644 index 000000000..8caeab2ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go @@ -0,0 +1,156 @@ +package bolt + +import ( + "math/rand" + "reflect" + "sort" + "testing" + "unsafe" +) + +// Ensure that a page is added to a transaction's freelist. +func TestFreelist_free(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12}) + if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) + } +} + +// Ensure that a page and its overflow is added to a transaction's freelist. +func TestFreelist_free_overflow(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 3}) + if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) + } +} + +// Ensure that a transaction's free pages can be released. +func TestFreelist_release(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 1}) + f.free(100, &page{id: 9}) + f.free(102, &page{id: 39}) + f.release(100) + f.release(101) + if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + f.release(102) + if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can find contiguous blocks of pages. +func TestFreelist_allocate(t *testing.T) { + f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} + if id := int(f.allocate(3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.allocate(1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.allocate(3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.allocate(1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + if id := int(f.allocate(1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.allocate(1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.allocate(1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can deserialize from a freelist page. +func TestFreelist_read(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = freelistPageFlag + page.count = 2 + + // Insert 2 page ids. + ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) + ids[0] = 23 + ids[1] = 50 + + // Deserialize page into a freelist. + f := newFreelist() + f.read(page) + + // Ensure that there are two page ids in the freelist. + if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can serialize into a freelist page. +func TestFreelist_write(t *testing.T) { + // Create a freelist and write it to a page. + var buf [4096]byte + f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} + f.pending[100] = []pgid{28, 11} + f.pending[101] = []pgid{3} + p := (*page)(unsafe.Pointer(&buf[0])) + f.write(p) + + // Read the page back out. + f2 := newFreelist() + f2.read(p) + + // Ensure that the freelist is correct. + // All pages should be present and in reverse order. + if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { + t.Fatalf("exp=%v; got=%v", exp, f2.ids) + } +} + +func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } +func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } +func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } +func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } + +func benchmark_FreelistRelease(b *testing.B, size int) { + ids := randomPgids(size) + pending := randomPgids(len(ids) / 400) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} + f.release(1) + } +} + +func randomPgids(n int) []pgid { + rand.Seed(42) + pgids := make(pgids, n) + for i := range pgids { + pgids[i] = pgid(rand.Int63()) + } + sort.Sort(pgids) + return pgids +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node.go new file mode 100644 index 000000000..c9fb21c73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/node.go @@ -0,0 +1,636 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If target node has extra nodes then just move one over. + if target.numChildren() > target.minKeys() { + if useNextSibling { + // Reparent and move node. + if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + n.inodes = append(n.inodes, target.inodes[0]) + target.inodes = target.inodes[1:] + + // Update target key on parent. + target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0) + target.key = target.inodes[0].key + _assert(len(target.key) > 0, "rebalance(1): zero-length node key") + } else { + // Reparent and move node. + if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[1:], n.inodes) + n.inodes[0] = target.inodes[len(target.inodes)-1] + target.inodes = target.inodes[:len(target.inodes)-1] + } + + // Update parent key for node. + n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0) + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "rebalance(2): zero-length node key") + + return + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go new file mode 100644 index 000000000..fa5d10f99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go @@ -0,0 +1,156 @@ +package bolt + +import ( + "testing" + "unsafe" +) + +// Ensure that a node can insert a key/value. +func TestNode_put(t *testing.T) { + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} + n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) + n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) + n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) + n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) + + if len(n.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if n.inodes[2].flags != uint32(leafPageFlag) { + t.Fatalf("not a leaf: %d", n.inodes[2].flags) + } +} + +// Ensure that a node can deserialize from a leaf page. +func TestNode_read_LeafPage(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = leafPageFlag + page.count = 2 + + // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 + nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) + nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 + nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 + + // Write data for the nodes at the end. + data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) + copy(data[:], []byte("barfooz")) + copy(data[7:], []byte("helloworldbye")) + + // Deserialize page into a leaf. + n := &node{} + n.read(page) + + // Check that there are two inodes with correct data. + if !n.isLeaf { + t.Fatal("expected leaf") + } + if len(n.inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } +} + +// Ensure that a node can serialize into a leaf page. +func TestNode_write_LeafPage(t *testing.T) { + // Create a node. + n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) + n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) + n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) + + // Write it to a page. + var buf [4096]byte + p := (*page)(unsafe.Pointer(&buf[0])) + n.write(p) + + // Read the page back in. + n2 := &node{} + n2.read(p) + + // Check that the two pages are the same. + if len(n2.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n2.inodes)) + } + if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } +} + +// Ensure that a node can split into appropriate subgroups. +func TestNode_split(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) + + // Split between 2 & 3. + n.split(100) + + var parent = n.parent + if len(parent.children) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children)) + } + if len(parent.children[0].inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) + } + if len(parent.children[1].inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) + } +} + +// Ensure that a page with the minimum number of inodes just returns a single node. +func TestNode_split_MinKeys(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + + // Split. + n.split(20) + if n.parent != nil { + t.Fatalf("expected nil parent") + } +} + +// Ensure that a node that has keys that all fit on a page just returns one leaf. +func TestNode_split_SinglePage(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) + + // Split. + n.split(4096) + if n.parent != nil { + t.Fatalf("expected nil parent") + } +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page.go new file mode 100644 index 000000000..818aa1b15 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/page.go @@ -0,0 +1,172 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go new file mode 100644 index 000000000..59f4a30ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go @@ -0,0 +1,72 @@ +package bolt + +import ( + "reflect" + "sort" + "testing" + "testing/quick" +) + +// Ensure that the page type can be returned in human readable format. +func TestPage_typ(t *testing.T) { + if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { + t.Fatalf("exp=branch; got=%v", typ) + } + if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { + t.Fatalf("exp=leaf; got=%v", typ) + } + if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { + t.Fatalf("exp=meta; got=%v", typ) + } + if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { + t.Fatalf("exp=freelist; got=%v", typ) + } + if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { + t.Fatalf("exp=unknown<4e20>; got=%v", typ) + } +} + +// Ensure that the hexdump debugging function doesn't blow up. +func TestPage_dump(t *testing.T) { + (&page{id: 256}).hexdump(16) +} + +func TestPgids_merge(t *testing.T) { + a := pgids{4, 5, 6, 10, 11, 12, 13, 27} + b := pgids{1, 3, 8, 9, 25, 30} + c := a.merge(b) + if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { + t.Errorf("mismatch: %v", c) + } + + a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} + b = pgids{8, 9, 25, 30} + c = a.merge(b) + if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { + t.Errorf("mismatch: %v", c) + } +} + +func TestPgids_merge_quick(t *testing.T) { + if err := quick.Check(func(a, b pgids) bool { + // Sort incoming lists. + sort.Sort(a) + sort.Sort(b) + + // Merge the two lists together. + got := a.merge(b) + + // The expected value should be the two lists combined and sorted. + exp := append(a, b...) + sort.Sort(exp) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) + return false + } + + return true + }, nil); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go new file mode 100644 index 000000000..4da581775 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go @@ -0,0 +1,79 @@ +package bolt_test + +import ( + "bytes" + "flag" + "fmt" + "math/rand" + "os" + "reflect" + "testing/quick" + "time" +) + +// testing/quick defaults to 5 iterations and a random seed. +// You can override these settings from the command line: +// +// -quick.count The number of iterations to perform. +// -quick.seed The seed to use for randomizing. +// -quick.maxitems The maximum number of items to insert into a DB. +// -quick.maxksize The maximum size of a key. +// -quick.maxvsize The maximum size of a value. +// + +var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int + +func init() { + flag.IntVar(&qcount, "quick.count", 5, "") + flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") + flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") + flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") + flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") + flag.Parse() + fmt.Fprintln(os.Stderr, "seed:", qseed) + fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) +} + +func qconfig() *quick.Config { + return &quick.Config{ + MaxCount: qcount, + Rand: rand.New(rand.NewSource(int64(qseed))), + } +} + +type testdata []testdataitem + +func (t testdata) Len() int { return len(t) } +func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } + +func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { + n := rand.Intn(qmaxitems-1) + 1 + items := make(testdata, n) + for i := 0; i < n; i++ { + item := &items[i] + item.Key = randByteSlice(rand, 1, qmaxksize) + item.Value = randByteSlice(rand, 0, qmaxvsize) + } + return reflect.ValueOf(items) +} + +type revtestdata []testdataitem + +func (t revtestdata) Len() int { return len(t) } +func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } + +type testdataitem struct { + Key []byte + Value []byte +} + +func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { + n := rand.Intn(maxSize-minSize) + minSize + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go new file mode 100644 index 000000000..ceb8baef0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go @@ -0,0 +1,327 @@ +package bolt_test + +import ( + "bytes" + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/boltdb/bolt" +) + +func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } +func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } +func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } +func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } +func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } + +func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } +func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } +func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } +func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } + +func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } +func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } +func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } + +func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } + +// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. +func testSimulate(t *testing.T, threadCount, parallelism int) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + rand.Seed(int64(qseed)) + + // A list of operations that readers and writers can perform. + var readerHandlers = []simulateHandler{simulateGetHandler} + var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} + + var versions = make(map[int]*QuickDB) + versions[1] = NewQuickDB() + + db := NewTestDB() + defer db.Close() + + var mutex sync.Mutex + + // Run n threads in parallel, each with their own operation. + var wg sync.WaitGroup + var threads = make(chan bool, parallelism) + var i int + for { + threads <- true + wg.Add(1) + writable := ((rand.Int() % 100) < 20) // 20% writers + + // Choose an operation to execute. + var handler simulateHandler + if writable { + handler = writerHandlers[rand.Intn(len(writerHandlers))] + } else { + handler = readerHandlers[rand.Intn(len(readerHandlers))] + } + + // Execute a thread for the given operation. + go func(writable bool, handler simulateHandler) { + defer wg.Done() + + // Start transaction. + tx, err := db.Begin(writable) + if err != nil { + t.Fatal("tx begin: ", err) + } + + // Obtain current state of the dataset. + mutex.Lock() + var qdb = versions[tx.ID()] + if writable { + qdb = versions[tx.ID()-1].Copy() + } + mutex.Unlock() + + // Make sure we commit/rollback the tx at the end and update the state. + if writable { + defer func() { + mutex.Lock() + versions[tx.ID()] = qdb + mutex.Unlock() + + ok(t, tx.Commit()) + }() + } else { + defer tx.Rollback() + } + + // Ignore operation if we don't have data yet. + if qdb == nil { + return + } + + // Execute handler. + handler(tx, qdb) + + // Release a thread back to the scheduling loop. + <-threads + }(writable, handler) + + i++ + if i > threadCount { + break + } + } + + // Wait until all threads are done. + wg.Wait() +} + +type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) + +// Retrieves a key from the database and verifies that it is what is expected. +func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { + // Randomly retrieve an existing exist. + keys := qdb.Rand() + if len(keys) == 0 { + return + } + + // Retrieve root bucket. + b := tx.Bucket(keys[0]) + if b == nil { + panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) + } + + // Drill into nested buckets. + for _, key := range keys[1 : len(keys)-1] { + b = b.Bucket(key) + if b == nil { + panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) + } + } + + // Verify key/value on the final bucket. + expected := qdb.Get(keys) + actual := b.Get(keys[len(keys)-1]) + if !bytes.Equal(actual, expected) { + fmt.Println("=== EXPECTED ===") + fmt.Println(expected) + fmt.Println("=== ACTUAL ===") + fmt.Println(actual) + fmt.Println("=== END ===") + panic("value mismatch") + } +} + +// Inserts a key into the database. +func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { + var err error + keys, value := randKeys(), randValue() + + // Retrieve root bucket. + b := tx.Bucket(keys[0]) + if b == nil { + b, err = tx.CreateBucket(keys[0]) + if err != nil { + panic("create bucket: " + err.Error()) + } + } + + // Create nested buckets, if necessary. + for _, key := range keys[1 : len(keys)-1] { + child := b.Bucket(key) + if child != nil { + b = child + } else { + b, err = b.CreateBucket(key) + if err != nil { + panic("create bucket: " + err.Error()) + } + } + } + + // Insert into database. + if err := b.Put(keys[len(keys)-1], value); err != nil { + panic("put: " + err.Error()) + } + + // Insert into in-memory database. + qdb.Put(keys, value) +} + +// QuickDB is an in-memory database that replicates the functionality of the +// Bolt DB type except that it is entirely in-memory. It is meant for testing +// that the Bolt database is consistent. +type QuickDB struct { + sync.RWMutex + m map[string]interface{} +} + +// NewQuickDB returns an instance of QuickDB. +func NewQuickDB() *QuickDB { + return &QuickDB{m: make(map[string]interface{})} +} + +// Get retrieves the value at a key path. +func (db *QuickDB) Get(keys [][]byte) []byte { + db.RLock() + defer db.RUnlock() + + m := db.m + for _, key := range keys[:len(keys)-1] { + value := m[string(key)] + if value == nil { + return nil + } + switch value := value.(type) { + case map[string]interface{}: + m = value + case []byte: + return nil + } + } + + // Only return if it's a simple value. + if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { + return value + } + return nil +} + +// Put inserts a value into a key path. +func (db *QuickDB) Put(keys [][]byte, value []byte) { + db.Lock() + defer db.Unlock() + + // Build buckets all the way down the key path. + m := db.m + for _, key := range keys[:len(keys)-1] { + if _, ok := m[string(key)].([]byte); ok { + return // Keypath intersects with a simple value. Do nothing. + } + + if m[string(key)] == nil { + m[string(key)] = make(map[string]interface{}) + } + m = m[string(key)].(map[string]interface{}) + } + + // Insert value into the last key. + m[string(keys[len(keys)-1])] = value +} + +// Rand returns a random key path that points to a simple value. +func (db *QuickDB) Rand() [][]byte { + db.RLock() + defer db.RUnlock() + if len(db.m) == 0 { + return nil + } + var keys [][]byte + db.rand(db.m, &keys) + return keys +} + +func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { + i, index := 0, rand.Intn(len(m)) + for k, v := range m { + if i == index { + *keys = append(*keys, []byte(k)) + if v, ok := v.(map[string]interface{}); ok { + db.rand(v, keys) + } + return + } + i++ + } + panic("quickdb rand: out-of-range") +} + +// Copy copies the entire database. +func (db *QuickDB) Copy() *QuickDB { + db.RLock() + defer db.RUnlock() + return &QuickDB{m: db.copy(db.m)} +} + +func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { + clone := make(map[string]interface{}, len(m)) + for k, v := range m { + switch v := v.(type) { + case map[string]interface{}: + clone[k] = db.copy(v) + default: + clone[k] = v + } + } + return clone +} + +func randKey() []byte { + var min, max = 1, 1024 + n := rand.Intn(max-min) + min + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} + +func randKeys() [][]byte { + var keys [][]byte + var count = rand.Intn(2) + 2 + for i := 0; i < count; i++ { + keys = append(keys, randKey()) + } + return keys +} + +func randValue() []byte { + n := rand.Intn(8192) + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go new file mode 100644 index 000000000..6b52b2c89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go @@ -0,0 +1,611 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + if err, ok := <-tx.Check(); ok { + panic("check fail: " + err.Error()) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove writer lock. + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + tx.db = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() in +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader directly. + var f *os.File + if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { + // Fallback to a regular open if that doesn't work. + if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { + return 0, err + } + } + + // Copy the meta pages. + tx.db.metalock.Lock() + n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) + tx.db.metalock.Unlock() + if err != nil { + _ = f.Close() + return n, fmt.Errorf("meta copy: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + _ = f.Close() + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Clear out page cache. + tx.pages = make(map[pgid]*page) + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary bufferred page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go new file mode 100644 index 000000000..6c8271a60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go @@ -0,0 +1,456 @@ +package bolt_test + +import ( + "errors" + "fmt" + "os" + "testing" + + "github.com/boltdb/bolt" +) + +// Ensure that committing a closed transaction returns an error. +func TestTx_Commit_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("foo")) + ok(t, tx.Commit()) + equals(t, tx.Commit(), bolt.ErrTxClosed) +} + +// Ensure that rolling back a closed transaction returns an error. +func TestTx_Rollback_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + ok(t, tx.Rollback()) + equals(t, tx.Rollback(), bolt.ErrTxClosed) +} + +// Ensure that committing a read-only transaction returns an error. +func TestTx_Commit_ReadOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(false) + equals(t, tx.Commit(), bolt.ErrTxNotWritable) +} + +// Ensure that a transaction can retrieve a cursor on the root bucket. +func TestTx_Cursor(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.CreateBucket([]byte("woojits")) + c := tx.Cursor() + + k, v := c.First() + equals(t, "widgets", string(k)) + assert(t, v == nil, "") + + k, v = c.Next() + equals(t, "woojits", string(k)) + assert(t, v == nil, "") + + k, v = c.Next() + assert(t, k == nil, "") + assert(t, v == nil, "") + + return nil + }) +} + +// Ensure that creating a bucket with a read-only transaction returns an error. +func TestTx_CreateBucket_ReadOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.View(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("foo")) + assert(t, b == nil, "") + equals(t, bolt.ErrTxNotWritable, err) + return nil + }) +} + +// Ensure that creating a bucket on a closed transaction returns an error. +func TestTx_CreateBucket_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.Commit() + b, err := tx.CreateBucket([]byte("foo")) + assert(t, b == nil, "") + equals(t, bolt.ErrTxClosed, err) +} + +// Ensure that a Tx can retrieve a bucket. +func TestTx_Bucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + assert(t, b != nil, "") + return nil + }) +} + +// Ensure that a Tx retrieving a non-existent key returns nil. +func TestTx_Get_Missing(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) + assert(t, value == nil, "") + return nil + }) +} + +// Ensure that a bucket can be created and retrieved. +func TestTx_CreateBucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + + // Create a bucket. + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + return nil + }) + + // Read the bucket through a separate transaction. + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + assert(t, b != nil, "") + return nil + }) +} + +// Ensure that a bucket can be created if it doesn't already exist. +func TestTx_CreateBucketIfNotExists(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + + b, err = tx.CreateBucketIfNotExists([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + + b, err = tx.CreateBucketIfNotExists([]byte{}) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) + + b, err = tx.CreateBucketIfNotExists(nil) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) + return nil + }) + + // Read the bucket through a separate transaction. + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + assert(t, b != nil, "") + return nil + }) +} + +// Ensure that a bucket cannot be created twice. +func TestTx_CreateBucket_Exists(t *testing.T) { + db := NewTestDB() + defer db.Close() + // Create a bucket. + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + return nil + }) + + // Create the same bucket again. + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketExists, err) + return nil + }) +} + +// Ensure that a bucket is created with a non-blank name. +func TestTx_CreateBucket_NameRequired(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket(nil) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) + return nil + }) +} + +// Ensure that a bucket can be deleted. +func TestTx_DeleteBucket(t *testing.T) { + db := NewTestDB() + defer db.Close() + + // Create a bucket and add a value. + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + return nil + }) + + // Delete the bucket and make sure we can't get the value. + db.Update(func(tx *bolt.Tx) error { + ok(t, tx.DeleteBucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) == nil, "") + return nil + }) + + db.Update(func(tx *bolt.Tx) error { + // Create the bucket again and make sure there's not a phantom value. + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + return nil + }) +} + +// Ensure that deleting a bucket on a closed transaction returns an error. +func TestTx_DeleteBucket_Closed(t *testing.T) { + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.Commit() + equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) +} + +// Ensure that deleting a bucket with a read-only transaction returns an error. +func TestTx_DeleteBucket_ReadOnly(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.View(func(tx *bolt.Tx) error { + equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) + return nil + }) +} + +// Ensure that nothing happens when deleting a bucket that doesn't exist. +func TestTx_DeleteBucket_NotFound(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) + return nil + }) +} + +// Ensure that no error is returned when a tx.ForEach function does not return +// an error. +func TestTx_ForEach_NoError(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + + equals(t, nil, tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return nil + })) + return nil + }) +} + +// Ensure that an error is returned when a tx.ForEach function returns an error. +func TestTx_ForEach_WithError(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + + err := errors.New("foo") + equals(t, err, tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return err + })) + return nil + }) +} + +// Ensure that Tx commit handlers are called after a transaction successfully commits. +func TestTx_OnCommit(t *testing.T) { + var x int + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + equals(t, 3, x) +} + +// Ensure that Tx commit handlers are NOT called after a transaction rolls back. +func TestTx_OnCommit_Rollback(t *testing.T) { + var x int + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + tx.CreateBucket([]byte("widgets")) + return errors.New("rollback this commit") + }) + equals(t, 0, x) +} + +// Ensure that the database can be copied to a file path. +func TestTx_CopyFile(t *testing.T) { + db := NewTestDB() + defer db.Close() + var dest = tempfile() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + return nil + }) + + ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) + + db2, err := bolt.Open(dest, 0600, nil) + ok(t, err) + defer db2.Close() + + db2.View(func(tx *bolt.Tx) error { + equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + return nil + }) +} + +type failWriterError struct{} + +func (failWriterError) Error() string { + return "error injected for tests" +} + +type failWriter struct { + // fail after this many bytes + After int +} + +func (f *failWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > f.After { + n = f.After + err = failWriterError{} + } + f.After -= n + return n, err +} + +// Ensure that Copy handles write errors right. +func TestTx_CopyFile_Error_Meta(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + return nil + }) + + err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) + equals(t, err.Error(), "meta copy: error injected for tests") +} + +// Ensure that Copy handles write errors right. +func TestTx_CopyFile_Error_Normal(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + return nil + }) + + err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) + equals(t, err.Error(), "error injected for tests") +} + +func ExampleTx_Rollback() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Create a bucket. + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + + // Set a value for a key. + db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + }) + + // Update the key but rollback the transaction so it never saves. + tx, _ := db.Begin(true) + b := tx.Bucket([]byte("widgets")) + b.Put([]byte("foo"), []byte("baz")) + tx.Rollback() + + // Ensure that our original value is still set. + db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value for 'foo' is still: %s\n", value) + return nil + }) + + // Output: + // The value for 'foo' is still: bar +} + +func ExampleTx_CopyFile() { + // Open the database. + db, _ := bolt.Open(tempfile(), 0666, nil) + defer os.Remove(db.Path()) + defer db.Close() + + // Create a bucket and a key. + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + return nil + }) + + // Copy the database to another file. + toFile := tempfile() + db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) }) + defer os.Remove(toFile) + + // Open the cloned database. + db2, _ := bolt.Open(toFile, 0666, nil) + defer db2.Close() + + // Ensure that the key exists in the copy. + db2.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value for 'foo' in the clone is: %s\n", value) + return nil + }) + + // Output: + // The value for 'foo' in the clone is: bar +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore b/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml b/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml new file mode 100644 index 000000000..ce9cb6233 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml @@ -0,0 +1,2 @@ +language: go +go: 1.3.3 diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE b/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md b/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md new file mode 100644 index 000000000..020b8fbf3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md @@ -0,0 +1,116 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## How To + +We define two functions, `Retry()` and `RetryNotify()`. +They receive an `Operation` to execute, a `BackOff` algorithm, +and an optional `Notify` error handler. + +The operation will be executed, and will be retried on failure with delay +as given by the backoff algorithm. The backoff algorithm can also decide when to stop +retrying. +In addition, the notify error handler will be called after each failed attempt, +except for the last time, whose error should be handled by the caller. + +```go +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +func Retry(Operation, BackOff) error +func RetryNotify(Operation, BackOff, Notify) +``` + +## Examples + +See more advanced examples in the [godoc][advanced example]. + +### Retry + +Simple retry helper that uses the default exponential backoff algorithm: + +```go +operation := func() error { + // An operation that might fail. + return nil // or return errors.New("some error") +} + +err := Retry(operation, NewExponentialBackOff()) +if err != nil { + // Handle error. + return err +} + +// Operation is successful. +return nil +``` + +### Ticker + +```go +operation := func() error { + // An operation that might fail + return nil // or return errors.New("some error") +} + +b := NewExponentialBackOff() +ticker := NewTicker(b) + +var err error + +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +for range ticker.C { + if err = operation(); err != nil { + log.Println(err, "will retry...") + continue + } + + ticker.Stop() + break +} + +if err != nil { + // Operation has failed. + return err +} + +// Operation is successful. +return nil +``` + +## Getting Started + +```bash +# install +$ go get github.com/cenkalti/backoff + +# test +$ cd $GOPATH/src/github.com/cenkalti/backoff +$ go get -t ./... +$ go test -v -cover +``` + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png + +[google-http-java-client]: https://github.com/google/google-http-java-client +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go new file mode 100644 index 000000000..3fe6783b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go @@ -0,0 +1,117 @@ +package backoff + +import ( + "io/ioutil" + "log" + "net/http" + "time" +) + +// This is an example that demonstrates how this package could be used +// to perform various advanced operations. +// +// It executes an HTTP GET request with exponential backoff, +// while errors are logged and failed responses are closed, as required by net/http package. +// +// Note we define a condition function which is used inside the operation to +// determine whether the operation succeeded or failed. +func Example() error { + res, err := GetWithRetry( + "http://localhost:9999", + ErrorIfStatusCodeIsNot(http.StatusOK), + NewExponentialBackOff()) + + if err != nil { + // Close response body of last (failed) attempt. + // The Last attempt isn't handled by the notify-on-error function, + // which closes the body of all the previous attempts. + if e := res.Body.Close(); e != nil { + log.Printf("error closing last attempt's response body: %s", e) + } + log.Printf("too many failed request attempts: %s", err) + return err + } + defer res.Body.Close() // The response's Body must be closed. + + // Read body + _, _ = ioutil.ReadAll(res.Body) + + // Do more stuff + return nil +} + +// GetWithRetry is a helper function that performs an HTTP GET request +// to the given URL, and retries with the given backoff using the given condition function. +// +// It also uses a notify-on-error function which logs +// and closes the response body of the failed request. +func GetWithRetry(url string, condition Condition, bck BackOff) (*http.Response, error) { + var res *http.Response + err := RetryNotify( + func() error { + var err error + res, err = http.Get(url) + if err != nil { + return err + } + return condition(res) + }, + bck, + LogAndClose()) + + return res, err +} + +// Condition is a retry condition function. +// It receives a response, and returns an error +// if the response failed the condition. +type Condition func(*http.Response) error + +// ErrorIfStatusCodeIsNot returns a retry condition function. +// The condition returns an error +// if the given response's status code is not the given HTTP status code. +func ErrorIfStatusCodeIsNot(status int) Condition { + return func(res *http.Response) error { + if res.StatusCode != status { + return NewError(res) + } + return nil + } +} + +// Error is returned on ErrorIfX() condition functions throughout this package. +type Error struct { + Response *http.Response +} + +func NewError(res *http.Response) *Error { + // Sanity check + if res == nil { + panic("response object is nil") + } + return &Error{Response: res} +} +func (err *Error) Error() string { return "request failed" } + +// LogAndClose is a notify-on-error function. +// It logs the error and closes the response body. +func LogAndClose() Notify { + return func(err error, wait time.Duration) { + switch e := err.(type) { + case *Error: + defer e.Response.Body.Close() + + b, err := ioutil.ReadAll(e.Response.Body) + var body string + if err != nil { + body = "can't read body" + } else { + body = string(b) + } + + log.Printf("%s: %s", e.Response.Status, body) + default: + log.Println(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go new file mode 100644 index 000000000..61bd6df66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go @@ -0,0 +1,59 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Also has a Retry() helper for retrying operations that may fail. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go new file mode 100644 index 000000000..91f27c4f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go @@ -0,0 +1,27 @@ +package backoff + +import ( + "testing" + "time" +) + +func TestNextBackOffMillis(t *testing.T) { + subtestNextBackOff(t, 0, new(ZeroBackOff)) + subtestNextBackOff(t, Stop, new(StopBackOff)) +} + +func subtestNextBackOff(t *testing.T, expectedValue time.Duration, backOffPolicy BackOff) { + for i := 0; i < 10; i++ { + next := backOffPolicy.NextBackOff() + if next != expectedValue { + t.Errorf("got: %d expected: %d", next, expectedValue) + } + } +} + +func TestConstantBackOff(t *testing.T) { + backoff := NewConstantBackOff(time.Second) + if backoff.NextBackOff() != time.Second { + t.Error("invalid interval") + } +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go new file mode 100644 index 000000000..0d1852e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go @@ -0,0 +1,51 @@ +package backoff + +import "log" + +func ExampleRetry() error { + operation := func() error { + // An operation that might fail. + return nil // or return errors.New("some error") + } + + err := Retry(operation, NewExponentialBackOff()) + if err != nil { + // Handle error. + return err + } + + // Operation is successful. + return nil +} + +func ExampleTicker() error { + operation := func() error { + // An operation that might fail + return nil // or return errors.New("some error") + } + + b := NewExponentialBackOff() + ticker := NewTicker(b) + + var err error + + // Ticks will continue to arrive when the previous operation is still running, + // so operations that take a while to fail could run in quick succession. + for _ = range ticker.C { + if err = operation(); err != nil { + log.Println(err, "will retry...") + continue + } + + ticker.Stop() + break + } + + if err != nil { + // Operation has failed. + return err + } + + // Operation is successful. + return nil +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go new file mode 100644 index 000000000..cc2a164f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go @@ -0,0 +1,151 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go new file mode 100644 index 000000000..11b95e4f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go @@ -0,0 +1,108 @@ +package backoff + +import ( + "math" + "testing" + "time" +) + +func TestBackOff(t *testing.T) { + var ( + testInitialInterval = 500 * time.Millisecond + testRandomizationFactor = 0.1 + testMultiplier = 2.0 + testMaxInterval = 5 * time.Second + testMaxElapsedTime = 15 * time.Minute + ) + + exp := NewExponentialBackOff() + exp.InitialInterval = testInitialInterval + exp.RandomizationFactor = testRandomizationFactor + exp.Multiplier = testMultiplier + exp.MaxInterval = testMaxInterval + exp.MaxElapsedTime = testMaxElapsedTime + exp.Reset() + + var expectedResults = []time.Duration{500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000} + for i, d := range expectedResults { + expectedResults[i] = d * time.Millisecond + } + + for _, expected := range expectedResults { + assertEquals(t, expected, exp.currentInterval) + // Assert that the next backoff falls in the expected range. + var minInterval = expected - time.Duration(testRandomizationFactor*float64(expected)) + var maxInterval = expected + time.Duration(testRandomizationFactor*float64(expected)) + var actualInterval = exp.NextBackOff() + if !(minInterval <= actualInterval && actualInterval <= maxInterval) { + t.Error("error") + } + } +} + +func TestGetRandomizedInterval(t *testing.T) { + // 33% chance of being 1. + assertEquals(t, 1, getRandomValueFromInterval(0.5, 0, 2)) + assertEquals(t, 1, getRandomValueFromInterval(0.5, 0.33, 2)) + // 33% chance of being 2. + assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.34, 2)) + assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.66, 2)) + // 33% chance of being 3. + assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.67, 2)) + assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.99, 2)) +} + +type TestClock struct { + i time.Duration + start time.Time +} + +func (c *TestClock) Now() time.Time { + t := c.start.Add(c.i) + c.i += time.Second + return t +} + +func TestGetElapsedTime(t *testing.T) { + var exp = NewExponentialBackOff() + exp.Clock = &TestClock{} + exp.Reset() + + var elapsedTime = exp.GetElapsedTime() + if elapsedTime != time.Second { + t.Errorf("elapsedTime=%d", elapsedTime) + } +} + +func TestMaxElapsedTime(t *testing.T) { + var exp = NewExponentialBackOff() + exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)} + // Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater + // than the max elapsed time. + exp.startTime = time.Time{} + assertEquals(t, Stop, exp.NextBackOff()) +} + +func TestBackOffOverflow(t *testing.T) { + var ( + testInitialInterval time.Duration = math.MaxInt64 / 2 + testMaxInterval time.Duration = math.MaxInt64 + testMultiplier = 2.1 + ) + + exp := NewExponentialBackOff() + exp.InitialInterval = testInitialInterval + exp.Multiplier = testMultiplier + exp.MaxInterval = testMaxInterval + exp.Reset() + + exp.NextBackOff() + // Assert that when an overflow is possible the current varerval time.Duration is set to the max varerval time.Duration . + assertEquals(t, testMaxInterval, exp.currentInterval) +} + +func assertEquals(t *testing.T, expected, value time.Duration) { + if expected != value { + t.Errorf("got: %d, expected: %d", value, expected) + } +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go new file mode 100644 index 000000000..f01f2bbd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go @@ -0,0 +1,46 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go new file mode 100644 index 000000000..c0d25ab76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go @@ -0,0 +1,34 @@ +package backoff + +import ( + "errors" + "log" + "testing" +) + +func TestRetry(t *testing.T) { + const successOn = 3 + var i = 0 + + // This function is successfull on "successOn" calls. + f := func() error { + i++ + log.Printf("function is called %d. time\n", i) + + if i == successOn { + log.Println("OK") + return nil + } + + log.Println("error") + return errors.New("error") + } + + err := Retry(f, NewExponentialBackOff()) + if err != nil { + t.Errorf("unexpected error: %s", err.Error()) + } + if i != successOn { + t.Errorf("invalid number of retries: %d", i) + } +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go new file mode 100644 index 000000000..7a5ff4ed1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go @@ -0,0 +1,79 @@ +package backoff + +import ( + "runtime" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send the time at times +// specified by the BackOff argument. Ticker is guaranteed to tick at least once. +// The channel is closed when Stop method is called or BackOff stops. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + stop: make(chan struct{}), + } + go t.run() + runtime.SetFinalizer(t, (*Ticker).Stop) + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + t.b.Reset() + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go new file mode 100644 index 000000000..7c392df46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go @@ -0,0 +1,45 @@ +package backoff + +import ( + "errors" + "log" + "testing" +) + +func TestTicker(t *testing.T) { + const successOn = 3 + var i = 0 + + // This function is successfull on "successOn" calls. + f := func() error { + i++ + log.Printf("function is called %d. time\n", i) + + if i == successOn { + log.Println("OK") + return nil + } + + log.Println("error") + return errors.New("error") + } + + b := NewExponentialBackOff() + ticker := NewTicker(b) + + var err error + for _ = range ticker.C { + if err = f(); err != nil { + t.Log(err) + continue + } + + break + } + if err != nil { + t.Errorf("unexpected error: %s", err.Error()) + } + if i != successOn { + t.Errorf("invalid number of retries: %d", i) + } +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go new file mode 100644 index 000000000..feb28f2a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go @@ -0,0 +1,258 @@ +// This code is based on encoding/json and gorilla/schema + +package encoding + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that should be recognized for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := getTag(sf) + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with valid tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// valid tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go new file mode 100644 index 000000000..f50478abb --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go @@ -0,0 +1,141 @@ +package encoding + +import ( + "errors" + "reflect" + "runtime" + "sync" +) + +var byteSliceType = reflect.TypeOf([]byte(nil)) + +type decoderFunc func(dv reflect.Value, sv reflect.Value) + +// Decode decodes map[string]interface{} into a struct. The first parameter +// must be a pointer. +func Decode(dst interface{}, src interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if v, ok := r.(string); ok { + err = errors.New(v) + } else { + err = r.(error) + } + } + }() + + dv := reflect.ValueOf(dst) + sv := reflect.ValueOf(src) + if dv.Kind() != reflect.Ptr { + return &DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + Reason: "must be a pointer", + } + } + + dv = dv.Elem() + if !dv.CanAddr() { + return &DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + Reason: "must be addressable", + } + } + + decode(dv, sv) + return nil +} + +// decode decodes the source value into the destination value +func decode(dv, sv reflect.Value) { + valueDecoder(dv, sv)(dv, sv) +} + +type decoderCacheKey struct { + dt, st reflect.Type +} + +var decoderCache struct { + sync.RWMutex + m map[decoderCacheKey]decoderFunc +} + +func valueDecoder(dv, sv reflect.Value) decoderFunc { + if !sv.IsValid() { + return invalidValueDecoder + } + + if dv.IsValid() { + dv = indirect(dv, false) + dv.Set(reflect.Zero(dv.Type())) + } + + return typeDecoder(dv.Type(), sv.Type()) +} + +func typeDecoder(dt, st reflect.Type) decoderFunc { + decoderCache.RLock() + f := decoderCache.m[decoderCacheKey{dt, st}] + decoderCache.RUnlock() + if f != nil { + return f + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + decoderCache.Lock() + var wg sync.WaitGroup + wg.Add(1) + decoderCache.m[decoderCacheKey{dt, st}] = func(dv, sv reflect.Value) { + wg.Wait() + f(dv, sv) + } + decoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeDecoder(dt, st) + wg.Done() + decoderCache.Lock() + decoderCache.m[decoderCacheKey{dt, st}] = f + decoderCache.Unlock() + return f +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +func indirect(v reflect.Value, decodeNull bool) reflect.Value { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodeNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go new file mode 100644 index 000000000..909b2cc2d --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go @@ -0,0 +1,426 @@ +package encoding + +import ( + "bytes" + "encoding/json" + "image" + "reflect" + "testing" +) + +type T struct { + X string + Y int + Z int `gorethink:"-"` +} + +type U struct { + Alphabet string `gorethink:"alpha"` +} + +type V struct { + F1 interface{} + F2 int32 + F3 string +} + +type tx struct { + x int +} + +var txType = reflect.TypeOf((*tx)(nil)).Elem() + +// Test data structures for anonymous fields. + +type Point struct { + Z int +} + +type Top struct { + Level0 int + Embed0 + *Embed0a + *Embed0b `gorethink:"e,omitempty"` // treated as named + Embed0c `gorethink:"-"` // ignored + Loop + Embed0p // has Point with X, Y, used + Embed0q // has Point with Z, used +} + +type Embed0 struct { + Level1a int // overridden by Embed0a's Level1a with tag + Level1b int // used because Embed0a's Level1b is renamed + Level1c int // used because Embed0a's Level1c is ignored + Level1d int // annihilated by Embed0a's Level1d + Level1e int `gorethink:"x"` // annihilated by Embed0a.Level1e +} + +type Embed0a struct { + Level1a int `gorethink:"Level1a,omitempty"` + Level1b int `gorethink:"LEVEL1B,omitempty"` + Level1c int `gorethink:"-"` + Level1d int // annihilated by Embed0's Level1d + Level1f int `gorethink:"x"` // annihilated by Embed0's Level1e +} + +type Embed0b Embed0 + +type Embed0c Embed0 + +type Embed0p struct { + image.Point +} + +type Embed0q struct { + Point +} + +type Loop struct { + Loop1 int `gorethink:",omitempty"` + Loop2 int `gorethink:",omitempty"` + *Loop +} + +// From reflect test: +// The X in S6 and S7 annihilate, but they also block the X in S8.S9. +type S5 struct { + S6 + S7 + S8 +} + +type S6 struct { + X int +} + +type S7 S6 + +type S8 struct { + S9 +} + +type S9 struct { + X int + Y int +} + +// From reflect test: +// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. +type S10 struct { + S11 + S12 + S13 +} + +type S11 struct { + S6 +} + +type S12 struct { + S6 +} + +type S13 struct { + S8 +} + +type Pointer struct { + PPoint *Point + Point Point +} + +type decodeTest struct { + in interface{} + ptr interface{} + out interface{} + err error +} + +type Ambig struct { + // Given "hello", the first match should win. + First int `gorethink:"HELLO"` + Second int `gorethink:"Hello"` +} + +var decodeTests = []decodeTest{ + // basic types + {in: true, ptr: new(bool), out: true}, + {in: 1, ptr: new(int), out: 1}, + {in: 1.2, ptr: new(float64), out: 1.2}, + {in: -5, ptr: new(int16), out: int16(-5)}, + {in: 2, ptr: new(string), out: string("2")}, + {in: float64(2.0), ptr: new(interface{}), out: float64(2.0)}, + {in: string("2"), ptr: new(interface{}), out: string("2")}, + {in: "a\u1234", ptr: new(string), out: "a\u1234"}, + {in: []interface{}{}, ptr: new([]string), out: []string{}}, + {in: map[string]interface{}{"X": []interface{}{1, 2, 3}, "Y": 4}, ptr: new(T), out: T{}, err: &DecodeTypeError{reflect.TypeOf(""), reflect.TypeOf([]interface{}{}), ""}}, + {in: map[string]interface{}{"x": 1}, ptr: new(tx), out: tx{}}, + {in: map[string]interface{}{"F1": float64(1), "F2": 2, "F3": 3}, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: string("3")}}, + {in: map[string]interface{}{"F1": string("1"), "F2": 2, "F3": 3}, ptr: new(V), out: V{F1: string("1"), F2: int32(2), F3: string("3")}}, + { + in: map[string]interface{}{"k1": int64(1), "k2": "s", "k3": []interface{}{int64(1), 2.0, 3e-3}, "k4": map[string]interface{}{"kk1": "s", "kk2": int64(2)}}, + out: map[string]interface{}{"k1": int64(1), "k2": "s", "k3": []interface{}{int64(1), 2.0, 3e-3}, "k4": map[string]interface{}{"kk1": "s", "kk2": int64(2)}}, + ptr: new(interface{}), + }, + + // Z has a "-" tag. + {in: map[string]interface{}{"Y": 1, "Z": 2}, ptr: new(T), out: T{Y: 1}}, + + {in: map[string]interface{}{"alpha": "abc", "alphabet": "xyz"}, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: map[string]interface{}{"alpha": "abc"}, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: map[string]interface{}{"alphabet": "xyz"}, ptr: new(U), out: U{}}, + + // array tests + {in: []interface{}{1, 2, 3}, ptr: new([3]int), out: [3]int{1, 2, 3}}, + {in: []interface{}{1, 2, 3}, ptr: new([1]int), out: [1]int{1}}, + {in: []interface{}{1, 2, 3}, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, + + // empty array to interface test + {in: map[string]interface{}{"T": []interface{}{}}, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, + + { + in: map[string]interface{}{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "level1d": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": map[string]interface{}{ + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12, + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + }, + ptr: new(Top), + out: Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + }, + }, + { + in: map[string]interface{}{"hello": 1}, + ptr: new(Ambig), + out: Ambig{First: 1}, + }, + { + in: map[string]interface{}{"X": 1, "Y": 2}, + ptr: new(S5), + out: S5{S8: S8{S9: S9{Y: 2}}}, + }, + { + in: map[string]interface{}{"X": 1, "Y": 2}, + ptr: new(S10), + out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, + }, + { + in: map[string]interface{}{"PPoint": map[string]interface{}{"Z": 1}, "Point": map[string]interface{}{"Z": 2}}, + ptr: new(Pointer), + out: Pointer{PPoint: &Point{Z: 1}, Point: Point{Z: 2}}, + }, + { + in: map[string]interface{}{"Point": map[string]interface{}{"Z": 2}}, + ptr: new(Pointer), + out: Pointer{PPoint: nil, Point: Point{Z: 2}}, + }, +} + +func TestDecode(t *testing.T) { + for i, tt := range decodeTests { + if tt.ptr == nil { + continue + } + + // v = new(right-type) + v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + + err := Decode(v.Interface(), tt.in) + if !jsonEqual(err, tt.err) { + t.Errorf("#%d: got error %v want %v", i, err, tt.err) + continue + } + + if tt.err == nil && !jsonEqual(v.Elem().Interface(), tt.out) { + t.Errorf("#%d: mismatch\nhave: %+v\nwant: %+v", i, v.Elem().Interface(), tt.out) + continue + } + + // Check round trip. + if tt.err == nil { + enc, err := Encode(v.Interface()) + if err != nil { + t.Errorf("#%d: error re-marshaling: %v", i, err) + continue + } + vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + + if err := Decode(vv.Interface(), enc); err != nil { + t.Errorf("#%d: error re-decodeing: %v", i, err) + continue + } + if !jsonEqual(v.Elem().Interface(), vv.Elem().Interface()) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) + continue + } + } + } +} + +func TestStringKind(t *testing.T) { + type aMap map[string]int + + var m1, m2 map[string]int + m1 = map[string]int{ + "foo": 42, + } + + data, err := Encode(m1) + if err != nil { + t.Errorf("Unexpected error encoding: %v", err) + } + + err = Decode(&m2, data) + if err != nil { + t.Errorf("Unexpected error decoding: %v", err) + } + + if !jsonEqual(m1, m2) { + t.Error("Items should be equal after encoding and then decoding") + } + +} + +// Test handling of unexported fields that should be ignored. +type unexportedFields struct { + Name string + m map[string]interface{} `gorethink:"-"` + m2 map[string]interface{} `gorethink:"abcd"` +} + +func TestDecodeUnexported(t *testing.T) { + input := map[string]interface{}{ + "Name": "Bob", + "m": map[string]interface{}{ + "x": 123, + }, + "m2": map[string]interface{}{ + "y": 123, + }, + "abcd": map[string]interface{}{ + "z": 789, + }, + } + want := &unexportedFields{Name: "Bob"} + + out := &unexportedFields{} + err := Decode(out, input) + if err != nil { + t.Errorf("got error %v, expected nil", err) + } + if !jsonEqual(out, want) { + t.Errorf("got %q, want %q", out, want) + } +} + +type Foo struct { + FooBar interface{} `gorethink:"foobar"` +} +type Bar struct { + Baz int `gorethink:"baz"` +} + +type UnmarshalerPointer struct { + Value *UnmarshalerValue +} + +type UnmarshalerValue struct { + ValueInt int64 + ValueString string +} + +func (v *UnmarshalerValue) MarshalRQL() (interface{}, error) { + if v.ValueInt != int64(0) { + return Encode(v.ValueInt) + } + if v.ValueString != "" { + return Encode(v.ValueString) + } + + return Encode(nil) +} + +func (v *UnmarshalerValue) UnmarshalRQL(b interface{}) (err error) { + n, s := int64(0), "" + + if err = Decode(&s, b); err == nil { + v.ValueString = s + return + } + if err = Decode(&n, b); err == nil { + v.ValueInt = n + + } + + return +} + +func TestDecodeUnmarshalerPointer(t *testing.T) { + input := map[string]interface{}{ + "Value": "abc", + } + want := &UnmarshalerPointer{ + Value: &UnmarshalerValue{ValueString: "abc"}, + } + + out := &UnmarshalerPointer{} + err := Decode(out, input) + if err != nil { + t.Errorf("got error %v, expected nil", err) + } + if !jsonEqual(out, want) { + t.Errorf("got %q, want %q", out, want) + } +} + +func jsonEqual(a, b interface{}) bool { + ba, err := json.Marshal(a) + if err != nil { + panic(err) + } + bb, err := json.Marshal(b) + if err != nil { + panic(err) + } + + return bytes.Compare(ba, bb) == 0 +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go new file mode 100644 index 000000000..61d268f83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go @@ -0,0 +1,524 @@ +package encoding + +import ( + "bytes" + "fmt" + "reflect" + "strconv" +) + +// newTypeDecoder constructs an decoderFunc for a type. +func newTypeDecoder(dt, st reflect.Type) decoderFunc { + if reflect.PtrTo(dt).Implements(unmarshalerType) || + dt.Implements(unmarshalerType) { + return unmarshalerDecoder + } + + if st.Kind() == reflect.Interface { + return interfaceAsTypeDecoder + } + + switch dt.Kind() { + case reflect.Bool: + switch st.Kind() { + case reflect.Bool: + return boolAsBoolDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsBoolDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsBoolDecoder + case reflect.Float32, reflect.Float64: + return floatAsBoolDecoder + case reflect.String: + return stringAsBoolDecoder + default: + return decodeTypeError + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch st.Kind() { + case reflect.Bool: + return boolAsIntDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsIntDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsIntDecoder + case reflect.Float32, reflect.Float64: + return floatAsIntDecoder + case reflect.String: + return stringAsIntDecoder + default: + return decodeTypeError + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch st.Kind() { + case reflect.Bool: + return boolAsUintDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsUintDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsUintDecoder + case reflect.Float32, reflect.Float64: + return floatAsUintDecoder + case reflect.String: + return stringAsUintDecoder + default: + return decodeTypeError + } + case reflect.Float32, reflect.Float64: + switch st.Kind() { + case reflect.Bool: + return boolAsFloatDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsFloatDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsFloatDecoder + case reflect.Float32, reflect.Float64: + return floatAsFloatDecoder + case reflect.String: + return stringAsFloatDecoder + default: + return decodeTypeError + } + case reflect.String: + switch st.Kind() { + case reflect.Bool: + return boolAsStringDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsStringDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsStringDecoder + case reflect.Float32, reflect.Float64: + return floatAsStringDecoder + case reflect.String: + return stringAsStringDecoder + default: + return decodeTypeError + } + case reflect.Interface: + if !st.AssignableTo(dt) { + return decodeTypeError + } + + return interfaceDecoder + case reflect.Ptr: + return newPtrDecoder(dt, st) + case reflect.Map: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Map: + return newMapAsMapDecoder(dt, st) + default: + return decodeTypeError + } + case reflect.Struct: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Map: + if kind := st.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return newDecodeTypeError(fmt.Errorf("map needs string keys")) + } + + return newMapAsStructDecoder(dt, st) + default: + return decodeTypeError + } + case reflect.Slice: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Array, reflect.Slice: + return newSliceDecoder(dt, st) + default: + return decodeTypeError + } + case reflect.Array: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Array, reflect.Slice: + return newArrayDecoder(dt, st) + default: + return decodeTypeError + } + default: + return unsupportedTypeDecoder + } +} + +func invalidValueDecoder(dv, sv reflect.Value) { + dv.Set(reflect.Zero(dv.Type())) +} + +func unsupportedTypeDecoder(dv, sv reflect.Value) { + panic(&UnsupportedTypeError{dv.Type()}) +} + +func decodeTypeError(dv, sv reflect.Value) { + panic(&DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + }) +} + +func newDecodeTypeError(err error) decoderFunc { + return func(dv, sv reflect.Value) { + panic(&DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + Reason: err.Error(), + }) + } +} + +func interfaceDecoder(dv, sv reflect.Value) { + dv.Set(sv) +} + +func interfaceAsTypeDecoder(dv, sv reflect.Value) { + decode(dv, sv.Elem()) +} + +type ptrDecoder struct { + elemDec decoderFunc +} + +func (d *ptrDecoder) decode(dv, sv reflect.Value) { + v := reflect.New(dv.Type().Elem()) + d.elemDec(v, sv) + dv.Set(v) +} + +func newPtrDecoder(dt, st reflect.Type) decoderFunc { + dec := &ptrDecoder{typeDecoder(dt.Elem(), st)} + + return dec.decode +} + +func unmarshalerDecoder(dv, sv reflect.Value) { + // modeled off of https://golang.org/src/encoding/json/decode.go?#L325 + if dv.Kind() != reflect.Ptr && dv.Type().Name() != "" && dv.CanAddr() { + dv = dv.Addr() + } + + if dv.IsNil() { + dv.Set(reflect.New(dv.Type().Elem())) + } + + u := dv.Interface().(Unmarshaler) + err := u.UnmarshalRQL(sv.Interface()) + if err != nil { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} + +// Boolean decoders + +func boolAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Bool()) +} +func boolAsIntDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetInt(1) + } else { + dv.SetInt(0) + } +} +func boolAsUintDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetUint(1) + } else { + dv.SetUint(0) + } +} +func boolAsFloatDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetFloat(1) + } else { + dv.SetFloat(0) + } +} +func boolAsStringDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetString("1") + } else { + dv.SetString("0") + } +} + +// Int decoders + +func intAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Int() != 0) +} +func intAsIntDecoder(dv, sv reflect.Value) { + dv.SetInt(sv.Int()) +} +func intAsUintDecoder(dv, sv reflect.Value) { + dv.SetUint(uint64(sv.Int())) +} +func intAsFloatDecoder(dv, sv reflect.Value) { + dv.SetFloat(float64(sv.Int())) +} +func intAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(strconv.FormatInt(sv.Int(), 10)) +} + +// Uint decoders + +func uintAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Uint() != 0) +} +func uintAsIntDecoder(dv, sv reflect.Value) { + dv.SetInt(int64(sv.Uint())) +} +func uintAsUintDecoder(dv, sv reflect.Value) { + dv.SetUint(sv.Uint()) +} +func uintAsFloatDecoder(dv, sv reflect.Value) { + dv.SetFloat(float64(sv.Uint())) +} +func uintAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(strconv.FormatUint(sv.Uint(), 10)) +} + +// Float decoders + +func floatAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Float() != 0) +} +func floatAsIntDecoder(dv, sv reflect.Value) { + dv.SetInt(int64(sv.Float())) +} +func floatAsUintDecoder(dv, sv reflect.Value) { + dv.SetUint(uint64(sv.Float())) +} +func floatAsFloatDecoder(dv, sv reflect.Value) { + dv.SetFloat(float64(sv.Float())) +} +func floatAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(strconv.FormatFloat(sv.Float(), 'f', -1, 64)) +} + +// String decoders + +func stringAsBoolDecoder(dv, sv reflect.Value) { + b, err := strconv.ParseBool(sv.String()) + if err == nil { + dv.SetBool(b) + } else if sv.String() == "" { + dv.SetBool(false) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsIntDecoder(dv, sv reflect.Value) { + i, err := strconv.ParseInt(sv.String(), 0, dv.Type().Bits()) + if err == nil { + dv.SetInt(i) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsUintDecoder(dv, sv reflect.Value) { + i, err := strconv.ParseUint(sv.String(), 0, dv.Type().Bits()) + if err == nil { + dv.SetUint(i) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsFloatDecoder(dv, sv reflect.Value) { + f, err := strconv.ParseFloat(sv.String(), dv.Type().Bits()) + if err == nil { + dv.SetFloat(f) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(sv.String()) +} + +// Slice/Array decoder + +type sliceDecoder struct { + arrayDec decoderFunc +} + +func (d *sliceDecoder) decode(dv, sv reflect.Value) { + if dv.Kind() == reflect.Slice { + dv.Set(reflect.MakeSlice(dv.Type(), dv.Len(), dv.Cap())) + } + + if !sv.IsNil() { + d.arrayDec(dv, sv) + } +} + +func newSliceDecoder(dt, st reflect.Type) decoderFunc { + // Byte slices get special treatment; arrays don't. + // if t.Elem().Kind() == reflect.Uint8 { + // return decodeByteSlice + // } + dec := &sliceDecoder{newArrayDecoder(dt, st)} + return dec.decode +} + +type arrayDecoder struct { + elemDec decoderFunc +} + +func (d *arrayDecoder) decode(dv, sv reflect.Value) { + // Iterate through the slice/array and decode each element before adding it + // to the dest slice/array + i := 0 + for i < sv.Len() { + if dv.Kind() == reflect.Slice { + // Get element of array, growing if necessary. + if i >= dv.Cap() { + newcap := dv.Cap() + dv.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newdv := reflect.MakeSlice(dv.Type(), dv.Len(), newcap) + reflect.Copy(newdv, dv) + dv.Set(newdv) + } + if i >= dv.Len() { + dv.SetLen(i + 1) + } + } + + if i < dv.Len() { + // Decode into element. + d.elemDec(dv.Index(i), sv.Index(i)) + } + + i++ + } + + // Ensure that the destination is the correct size + if i < dv.Len() { + if dv.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(dv.Type().Elem()) + for ; i < dv.Len(); i++ { + dv.Index(i).Set(z) + } + } else { + dv.SetLen(i) + } + } +} + +func newArrayDecoder(dt, st reflect.Type) decoderFunc { + dec := &arrayDecoder{typeDecoder(dt.Elem(), st.Elem())} + return dec.decode +} + +// Map decoder + +type mapAsMapDecoder struct { + keyDec, elemDec decoderFunc +} + +func (d *mapAsMapDecoder) decode(dv, sv reflect.Value) { + dt := dv.Type() + dv.Set(reflect.MakeMap(reflect.MapOf(dt.Key(), dt.Elem()))) + + var mapKey reflect.Value + var mapElem reflect.Value + + keyType := dv.Type().Key() + elemType := dv.Type().Elem() + + for _, sElemKey := range sv.MapKeys() { + var dElemKey reflect.Value + var dElemVal reflect.Value + + if !mapKey.IsValid() { + mapKey = reflect.New(keyType).Elem() + } else { + mapKey.Set(reflect.Zero(keyType)) + } + dElemKey = mapKey + + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + dElemVal = mapElem + + d.keyDec(dElemKey, sElemKey) + d.elemDec(dElemVal, sv.MapIndex(sElemKey)) + + dv.SetMapIndex(dElemKey, dElemVal) + } +} + +func newMapAsMapDecoder(dt, st reflect.Type) decoderFunc { + d := &mapAsMapDecoder{typeDecoder(dt.Key(), st.Key()), typeDecoder(dt.Elem(), st.Elem())} + return d.decode +} + +type mapAsStructDecoder struct { + fields []field + fieldDecs []decoderFunc +} + +func (d *mapAsStructDecoder) decode(dv, sv reflect.Value) { + for _, kv := range sv.MapKeys() { + var f *field + var fieldDec decoderFunc + key := []byte(kv.String()) + for i := range d.fields { + ff := &d.fields[i] + ffd := d.fieldDecs[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + fieldDec = ffd + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + fieldDec = ffd + } + } + + if f == nil { + continue + } + + dElemVal := fieldByIndex(dv, f.index) + sElemVal := sv.MapIndex(kv) + + if !sElemVal.IsValid() || !dElemVal.CanSet() { + continue + } + + fieldDec(dElemVal, sElemVal) + } +} + +func newMapAsStructDecoder(dt, st reflect.Type) decoderFunc { + fields := cachedTypeFields(dt) + se := &mapAsStructDecoder{ + fields: fields, + fieldDecs: make([]decoderFunc, len(fields)), + } + for i, f := range fields { + se.fieldDecs[i] = typeDecoder(typeByIndex(dt, f.index), st.Elem()) + } + return se.decode +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go new file mode 100644 index 000000000..3b0d3508d --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go @@ -0,0 +1,89 @@ +// This code is based on encoding/json and gorilla/schema + +package encoding + +import ( + "errors" + "reflect" + "runtime" + "sync" +) + +type encoderFunc func(v reflect.Value) interface{} + +// Encode returns the encoded value of v. +// +// Encode traverses the value v recursively and looks for structs. If a struct +// is found then it is checked for tagged fields and convert to +// map[string]interface{} +func Encode(v interface{}) (ev interface{}, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if v, ok := r.(string); ok { + err = errors.New(v) + } else { + err = r.(error) + } + } + }() + + return encode(reflect.ValueOf(v)), nil +} + +func encode(v reflect.Value) interface{} { + return valueEncoder(v)(v) +} + +var encoderCache struct { + sync.RWMutex + m map[reflect.Type]encoderFunc +} + +func valueEncoder(v reflect.Value) encoderFunc { + if !v.IsValid() { + return invalidValueEncoder + } + return typeEncoder(v.Type()) +} + +func typeEncoder(t reflect.Type) encoderFunc { + encoderCache.RLock() + f := encoderCache.m[t] + encoderCache.RUnlock() + if f != nil { + return f + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to + // be ready and then calls it. This indirect + // func is only used for recursive types. + encoderCache.Lock() + var wg sync.WaitGroup + wg.Add(1) + encoderCache.m[t] = func(v reflect.Value) interface{} { + wg.Wait() + return f(v) + } + encoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeEncoder(t, true) + wg.Done() + encoderCache.Lock() + encoderCache.m[t] = f + encoderCache.Unlock() + return f +} + +// IgnoreType causes the encoder to ignore a type when encoding +func IgnoreType(t reflect.Type) { + encoderCache.Lock() + encoderCache.m[t] = doNothingEncoder + encoderCache.Unlock() +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go new file mode 100644 index 000000000..7b1ee0614 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go @@ -0,0 +1,262 @@ +package encoding + +import ( + "image" + "reflect" + "testing" + "time" +) + +var encodeExpected = map[string]interface{}{ + "Level0": int64(1), + "Level1b": int64(2), + "Level1c": int64(3), + "Level1a": int64(5), + "LEVEL1B": int64(6), + "e": map[string]interface{}{ + "Level1a": int64(8), + "Level1b": int64(9), + "Level1c": int64(10), + "Level1d": int64(11), + "x": int64(12), + }, + "Loop1": int64(13), + "Loop2": int64(14), + "X": int64(15), + "Y": int64(16), + "Z": int64(17), +} + +func TestEncode(t *testing.T) { + // Top is defined in decoder_test.go + var in = Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + } + + got, err := Encode(&in) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, encodeExpected) { + t.Errorf(" got: %v\nwant: %v\n", got, encodeExpected) + } +} + +type Optionals struct { + Sr string `gorethink:"sr"` + So string `gorethink:"so,omitempty"` + Sw string `gorethink:"-"` + + Ir int `gorethink:"omitempty"` // actually named omitempty, not an option + Io int `gorethink:"io,omitempty"` + + Tr time.Time `gorethink:"tr"` + To time.Time `gorethink:"to,omitempty"` + + Slr []string `gorethink:"slr"` + Slo []string `gorethink:"slo,omitempty"` + + Mr map[string]interface{} `gorethink:"mr"` + Mo map[string]interface{} `gorethink:",omitempty"` +} + +var optionalsExpected = map[string]interface{}{ + "sr": "", + "omitempty": int64(0), + "tr": map[string]interface{}{"$reql_type$": "TIME", "epoch_time": 0, "timezone": "+00:00"}, + "slr": []interface{}{}, + "mr": map[string]interface{}{}, +} + +func TestOmitEmpty(t *testing.T) { + var o Optionals + o.Sw = "something" + o.Tr = time.Unix(0, 0) + o.Mr = map[string]interface{}{} + o.Mo = map[string]interface{}{} + + got, err := Encode(&o) + if err != nil { + t.Fatal(err) + } + if !jsonEqual(got, optionalsExpected) { + t.Errorf("\ngot: %#v\nwant: %#v\n", got, optionalsExpected) + } +} + +type IntType int + +type MyStruct struct { + IntType +} + +func TestAnonymousNonstruct(t *testing.T) { + var i IntType = 11 + a := MyStruct{i} + var want = map[string]interface{}{"IntType": int64(11)} + + got, err := Encode(a) + if err != nil { + t.Fatalf("Encode: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestEncodePointer(t *testing.T) { + v := Pointer{PPoint: &Point{Z: 1}, Point: Point{Z: 2}} + var want = map[string]interface{}{ + "PPoint": map[string]interface{}{"Z": int64(1)}, + "Point": map[string]interface{}{"Z": int64(2)}, + } + + got, err := Encode(v) + if err != nil { + t.Fatalf("Encode: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestEncodeNilPointer(t *testing.T) { + v := Pointer{PPoint: nil, Point: Point{Z: 2}} + var want = map[string]interface{}{ + "PPoint": nil, + "Point": map[string]interface{}{"Z": int64(2)}, + } + + got, err := Encode(v) + if err != nil { + t.Fatalf("Encode: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +type BugA struct { + S string +} + +type BugB struct { + BugA + S string +} + +type BugC struct { + S string +} + +// Legal Go: We never use the repeated embedded field (S). +type BugX struct { + A int + BugA + BugB +} + +// Issue 5245. +func TestEmbeddedBug(t *testing.T) { + v := BugB{ + BugA{"A"}, + "B", + } + got, err := Encode(v) + if err != nil { + t.Fatal("Encode:", err) + } + want := map[string]interface{}{"S": "B"} + if !reflect.DeepEqual(got, want) { + t.Fatalf("Encode: got %v want %v", got, want) + } + // Now check that the duplicate field, S, does not appear. + x := BugX{ + A: 23, + } + got, err = Encode(x) + if err != nil { + t.Fatal("Encode:", err) + } + want = map[string]interface{}{"A": int64(23)} + if !reflect.DeepEqual(got, want) { + t.Fatalf("Encode: got %v want %v", got, want) + } +} + +type BugD struct { // Same as BugA after tagging. + XXX string `gorethink:"S"` +} + +// BugD's tagged S field should dominate BugA's. +type BugY struct { + BugA + BugD +} + +// Test that a field with a tag dominates untagged fields. +func TestTaggedFieldDominates(t *testing.T) { + v := BugY{ + BugA{"BugA"}, + BugD{"BugD"}, + } + got, err := Encode(v) + if err != nil { + t.Fatal("Encode:", err) + } + want := map[string]interface{}{"S": "BugD"} + if !reflect.DeepEqual(got, want) { + t.Fatalf("Encode: got %v want %v", got, want) + } +} + +// There are no tags here, so S should not appear. +type BugZ struct { + BugA + BugC + BugY // Contains a tagged S field through BugD; should not dominate. +} + +func TestDuplicatedFieldDisappears(t *testing.T) { + v := BugZ{ + BugA{"BugA"}, + BugC{"BugC"}, + BugY{ + BugA{"nested BugA"}, + BugD{"nested BugD"}, + }, + } + got, err := Encode(v) + if err != nil { + t.Fatal("Encode:", err) + } + want := map[string]interface{}{} + if !reflect.DeepEqual(got, want) { + t.Fatalf("Encode: got %v want %v", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go new file mode 100644 index 000000000..de38a1905 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go @@ -0,0 +1,303 @@ +package encoding + +import ( + "encoding/base64" + "math" + "reflect" + "time" +) + +// newTypeEncoder constructs an encoderFunc for a type. +// The returned encoder only checks CanAddr when allowAddr is true. +func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { + if t.Implements(marshalerType) { + return marshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(marshalerType) { + return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) + } + } + + // Check for psuedo-types first + switch t { + case timeType: + return timePseudoTypeEncoder + } + + switch t.Kind() { + case reflect.Bool: + return boolEncoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intEncoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintEncoder + case reflect.Float32, reflect.Float64: + return floatEncoder + case reflect.String: + return stringEncoder + case reflect.Interface: + return interfaceEncoder + case reflect.Struct: + return newStructEncoder(t) + case reflect.Map: + return newMapEncoder(t) + case reflect.Slice: + return newSliceEncoder(t) + case reflect.Array: + return newArrayEncoder(t) + case reflect.Ptr: + return newPtrEncoder(t) + default: + return unsupportedTypeEncoder + } +} + +func invalidValueEncoder(v reflect.Value) interface{} { + return nil +} + +func doNothingEncoder(v reflect.Value) interface{} { + return v.Interface() +} + +func marshalerEncoder(v reflect.Value) interface{} { + if v.Kind() == reflect.Ptr && v.IsNil() { + return nil + } + m := v.Interface().(Marshaler) + ev, err := m.MarshalRQL() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + + return ev +} + +func addrMarshalerEncoder(v reflect.Value) interface{} { + va := v.Addr() + if va.IsNil() { + return nil + } + m := va.Interface().(Marshaler) + ev, err := m.MarshalRQL() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + + return ev +} + +func boolEncoder(v reflect.Value) interface{} { + if v.Bool() { + return true + } else { + return false + } +} + +func intEncoder(v reflect.Value) interface{} { + return v.Int() +} + +func uintEncoder(v reflect.Value) interface{} { + return v.Uint() +} + +func floatEncoder(v reflect.Value) interface{} { + return v.Float() +} + +func stringEncoder(v reflect.Value) interface{} { + return v.String() +} + +func interfaceEncoder(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + return encode(v.Elem()) +} + +func unsupportedTypeEncoder(v reflect.Value) interface{} { + panic(&UnsupportedTypeError{v.Type()}) +} + +type structEncoder struct { + fields []field + fieldEncs []encoderFunc +} + +func (se *structEncoder) encode(v reflect.Value) interface{} { + m := make(map[string]interface{}) + + for i, f := range se.fields { + fv := fieldByIndex(v, f.index) + if !fv.IsValid() || f.omitEmpty && se.isEmptyValue(fv) { + continue + } + + m[f.name] = se.fieldEncs[i](fv) + } + + return m +} + +func (se *structEncoder) isEmptyValue(v reflect.Value) bool { + if v.Type() == timeType { + return v.Interface().(time.Time) == time.Time{} + } + + return isEmptyValue(v) +} + +func newStructEncoder(t reflect.Type) encoderFunc { + fields := cachedTypeFields(t) + se := &structEncoder{ + fields: fields, + fieldEncs: make([]encoderFunc, len(fields)), + } + for i, f := range fields { + se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) + } + return se.encode +} + +type mapEncoder struct { + elemEnc encoderFunc +} + +func (me *mapEncoder) encode(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + + m := make(map[string]interface{}) + + for _, k := range v.MapKeys() { + m[k.String()] = me.elemEnc(v.MapIndex(k)) + } + + return m +} + +func newMapEncoder(t reflect.Type) encoderFunc { + if t.Key().Kind() != reflect.String { + return unsupportedTypeEncoder + } + me := &mapEncoder{typeEncoder(t.Elem())} + return me.encode +} + +// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. +type sliceEncoder struct { + arrayEnc encoderFunc +} + +func (se *sliceEncoder) encode(v reflect.Value) interface{} { + if v.IsNil() { + return []interface{}{} + } + return se.arrayEnc(v) +} + +func newSliceEncoder(t reflect.Type) encoderFunc { + // Byte slices get special treatment; arrays don't. + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteSlice + } + enc := &sliceEncoder{newArrayEncoder(t)} + return enc.encode +} + +type arrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *arrayEncoder) encode(v reflect.Value) interface{} { + n := v.Len() + + a := make([]interface{}, n) + for i := 0; i < n; i++ { + a[i] = ae.elemEnc(v.Index(i)) + } + + return a +} + +func newArrayEncoder(t reflect.Type) encoderFunc { + enc := &arrayEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type ptrEncoder struct { + elemEnc encoderFunc +} + +func (pe *ptrEncoder) encode(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + return pe.elemEnc(v.Elem()) +} + +func newPtrEncoder(t reflect.Type) encoderFunc { + enc := &ptrEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type condAddrEncoder struct { + canAddrEnc, elseEnc encoderFunc +} + +func (ce *condAddrEncoder) encode(v reflect.Value) interface{} { + if v.CanAddr() { + return ce.canAddrEnc(v) + } else { + return ce.elseEnc(v) + } +} + +// newCondAddrEncoder returns an encoder that checks whether its value +// CanAddr and delegates to canAddrEnc if so, else to elseEnc. +func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { + enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return enc.encode +} + +// Pseudo-type encoders + +// Encode a time.Time value to the TIME RQL type +func timePseudoTypeEncoder(v reflect.Value) interface{} { + t := v.Interface().(time.Time) + + timeVal := float64(t.UnixNano()) / float64(time.Second) + + // use seconds-since-epoch precision if time.Time `t` + // is before the oldest nanosecond time + if t.Before(time.Unix(0, math.MinInt64)) { + timeVal = float64(t.Unix()) + } + + return map[string]interface{}{ + "$reql_type$": "TIME", + "epoch_time": timeVal, + "timezone": "+00:00", + } +} + +// Encode a byte slice to the BINARY RQL type +func encodeByteSlice(v reflect.Value) interface{} { + var b []byte + if !v.IsNil() { + b = v.Bytes() + } + + dst := make([]byte, base64.StdEncoding.EncodedLen(len(b))) + base64.StdEncoding.Encode(dst, b) + + return map[string]interface{}{ + "$reql_type$": "BINARY", + "data": string(dst), + } +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go new file mode 100644 index 000000000..0169e1448 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go @@ -0,0 +1,32 @@ +package encoding + +import ( + "reflect" + "time" +) + +var ( + // type constants + stringType = reflect.TypeOf("") + timeType = reflect.TypeOf(new(time.Time)).Elem() + + marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +) + +// Marshaler is the interface implemented by objects that +// can marshal themselves into a valid RQL psuedo-type. +type Marshaler interface { + MarshalRQL() (interface{}, error) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a psuedo-type object of themselves. +type Unmarshaler interface { + UnmarshalRQL(interface{}) error +} + +func init() { + encoderCache.m = make(map[reflect.Type]encoderFunc) + decoderCache.m = make(map[decoderCacheKey]decoderFunc) +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go new file mode 100644 index 000000000..8b9ac2c52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go @@ -0,0 +1,102 @@ +package encoding + +import ( + "fmt" + "reflect" + "strings" +) + +type MarshalerError struct { + Type reflect.Type + Err error +} + +func (e *MarshalerError) Error() string { + return "gorethink: error calling MarshalRQL for type " + e.Type.String() + ": " + e.Err.Error() +} + +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "gorethink: UnmarshalRQL(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "gorethink: UnmarshalRQL(non-pointer " + e.Type.String() + ")" + } + return "gorethink: UnmarshalRQL(nil " + e.Type.String() + ")" +} + +// An InvalidTypeError describes a value that was +// not appropriate for a value of a specific Go type. +type DecodeTypeError struct { + DestType, SrcType reflect.Type + Reason string +} + +func (e *DecodeTypeError) Error() string { + if e.Reason != "" { + return "gorethink: could not decode type " + e.SrcType.String() + " into Go value of type " + e.DestType.String() + ": " + e.Reason + } else { + return "gorethink: could not decode type " + e.SrcType.String() + " into Go value of type " + e.DestType.String() + + } +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "gorethink: unsupported type: " + e.Type.String() +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unexpected value type. +type UnexpectedTypeError struct { + DestType, SrcType reflect.Type +} + +func (e *UnexpectedTypeError) Error() string { + return "gorethink: expected type: " + e.DestType.String() + ", got " + e.SrcType.String() +} + +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return "gorethink: unsupported value: " + e.Str +} + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go new file mode 100644 index 000000000..21c9e68e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go @@ -0,0 +1,139 @@ +package encoding + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go new file mode 100644 index 000000000..cea3edaf1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go @@ -0,0 +1,69 @@ +// This code is based on encoding/json and gorilla/schema + +package encoding + +import ( + "reflect" + "strings" + "unicode" +) + +const TagName = "gorethink" + +// tagOptions is the string following a comma in a struct field's +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +func getTag(sf reflect.StructField) string { + return sf.Tag.Get(TagName) +} + +// parseTag splits a struct field's tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// Contains returns whether checks that a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go new file mode 100644 index 000000000..0ca2c7734 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go @@ -0,0 +1,72 @@ +package encoding + +import "reflect" + +func getTypeKind(t reflect.Type) reflect.Kind { + kind := t.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + for _, i := range index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(i) + } + + return v +} + +func typeByIndex(t reflect.Type, index []int) reflect.Type { + for _, i := range index { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + t = t.Field(i).Type + } + return t +} + +// valueByString sorts reflect.Value by the string value, this is useful for +// sorting the result of MapKeys +type valueByString []reflect.Value + +func (x valueByString) Len() int { return len(x) } + +func (x valueByString) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x valueByString) Less(i, j int) bool { + return x[i].String() < x[j].String() +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go new file mode 100644 index 000000000..54ac15198 --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go @@ -0,0 +1,1243 @@ +// Code generated by protoc-gen-go. +// source: ql2.proto +// DO NOT EDIT! + +package ql2 + +import proto "github.com/golang/protobuf/proto" +import json "encoding/json" +import math "math" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +type VersionDummy_Version int32 + +const ( + VersionDummy_V0_1 VersionDummy_Version = 1063369270 + VersionDummy_V0_2 VersionDummy_Version = 1915781601 + VersionDummy_V0_3 VersionDummy_Version = 1601562686 + VersionDummy_V0_4 VersionDummy_Version = 1074539808 +) + +var VersionDummy_Version_name = map[int32]string{ + 1063369270: "V0_1", + 1915781601: "V0_2", + 1601562686: "V0_3", + 1074539808: "V0_4", +} +var VersionDummy_Version_value = map[string]int32{ + "V0_1": 1063369270, + "V0_2": 1915781601, + "V0_3": 1601562686, + "V0_4": 1074539808, +} + +func (x VersionDummy_Version) Enum() *VersionDummy_Version { + p := new(VersionDummy_Version) + *p = x + return p +} +func (x VersionDummy_Version) String() string { + return proto.EnumName(VersionDummy_Version_name, int32(x)) +} +func (x VersionDummy_Version) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *VersionDummy_Version) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(VersionDummy_Version_value, data, "VersionDummy_Version") + if err != nil { + return err + } + *x = VersionDummy_Version(value) + return nil +} + +type VersionDummy_Protocol int32 + +const ( + VersionDummy_PROTOBUF VersionDummy_Protocol = 656407617 + VersionDummy_JSON VersionDummy_Protocol = 2120839367 +) + +var VersionDummy_Protocol_name = map[int32]string{ + 656407617: "PROTOBUF", + 2120839367: "JSON", +} +var VersionDummy_Protocol_value = map[string]int32{ + "PROTOBUF": 656407617, + "JSON": 2120839367, +} + +func (x VersionDummy_Protocol) Enum() *VersionDummy_Protocol { + p := new(VersionDummy_Protocol) + *p = x + return p +} +func (x VersionDummy_Protocol) String() string { + return proto.EnumName(VersionDummy_Protocol_name, int32(x)) +} +func (x VersionDummy_Protocol) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *VersionDummy_Protocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(VersionDummy_Protocol_value, data, "VersionDummy_Protocol") + if err != nil { + return err + } + *x = VersionDummy_Protocol(value) + return nil +} + +type Query_QueryType int32 + +const ( + Query_START Query_QueryType = 1 + Query_CONTINUE Query_QueryType = 2 + Query_STOP Query_QueryType = 3 + Query_NOREPLY_WAIT Query_QueryType = 4 +) + +var Query_QueryType_name = map[int32]string{ + 1: "START", + 2: "CONTINUE", + 3: "STOP", + 4: "NOREPLY_WAIT", +} +var Query_QueryType_value = map[string]int32{ + "START": 1, + "CONTINUE": 2, + "STOP": 3, + "NOREPLY_WAIT": 4, +} + +func (x Query_QueryType) Enum() *Query_QueryType { + p := new(Query_QueryType) + *p = x + return p +} +func (x Query_QueryType) String() string { + return proto.EnumName(Query_QueryType_name, int32(x)) +} +func (x Query_QueryType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Query_QueryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_QueryType_value, data, "Query_QueryType") + if err != nil { + return err + } + *x = Query_QueryType(value) + return nil +} + +type Frame_FrameType int32 + +const ( + Frame_POS Frame_FrameType = 1 + Frame_OPT Frame_FrameType = 2 +) + +var Frame_FrameType_name = map[int32]string{ + 1: "POS", + 2: "OPT", +} +var Frame_FrameType_value = map[string]int32{ + "POS": 1, + "OPT": 2, +} + +func (x Frame_FrameType) Enum() *Frame_FrameType { + p := new(Frame_FrameType) + *p = x + return p +} +func (x Frame_FrameType) String() string { + return proto.EnumName(Frame_FrameType_name, int32(x)) +} +func (x Frame_FrameType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Frame_FrameType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Frame_FrameType_value, data, "Frame_FrameType") + if err != nil { + return err + } + *x = Frame_FrameType(value) + return nil +} + +type Response_ResponseType int32 + +const ( + Response_SUCCESS_ATOM Response_ResponseType = 1 + Response_SUCCESS_SEQUENCE Response_ResponseType = 2 + Response_SUCCESS_PARTIAL Response_ResponseType = 3 + Response_WAIT_COMPLETE Response_ResponseType = 4 + Response_CLIENT_ERROR Response_ResponseType = 16 + Response_COMPILE_ERROR Response_ResponseType = 17 + Response_RUNTIME_ERROR Response_ResponseType = 18 +) + +var Response_ResponseType_name = map[int32]string{ + 1: "SUCCESS_ATOM", + 2: "SUCCESS_SEQUENCE", + 3: "SUCCESS_PARTIAL", + 4: "WAIT_COMPLETE", + 16: "CLIENT_ERROR", + 17: "COMPILE_ERROR", + 18: "RUNTIME_ERROR", +} +var Response_ResponseType_value = map[string]int32{ + "SUCCESS_ATOM": 1, + "SUCCESS_SEQUENCE": 2, + "SUCCESS_PARTIAL": 3, + "WAIT_COMPLETE": 4, + "CLIENT_ERROR": 16, + "COMPILE_ERROR": 17, + "RUNTIME_ERROR": 18, +} + +func (x Response_ResponseType) Enum() *Response_ResponseType { + p := new(Response_ResponseType) + *p = x + return p +} +func (x Response_ResponseType) String() string { + return proto.EnumName(Response_ResponseType_name, int32(x)) +} +func (x Response_ResponseType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Response_ResponseType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Response_ResponseType_value, data, "Response_ResponseType") + if err != nil { + return err + } + *x = Response_ResponseType(value) + return nil +} + +type Response_ResponseNote int32 + +const ( + Response_SEQUENCE_FEED Response_ResponseNote = 1 + Response_ATOM_FEED Response_ResponseNote = 2 + Response_ORDER_BY_LIMIT_FEED Response_ResponseNote = 3 + Response_UNIONED_FEED Response_ResponseNote = 4 + Response_INCLUDES_STATES Response_ResponseNote = 5 +) + +var Response_ResponseNote_name = map[int32]string{ + 1: "SEQUENCE_FEED", + 2: "ATOM_FEED", + 3: "ORDER_BY_LIMIT_FEED", + 4: "UNIONED_FEED", + 5: "INCLUDES_STATES", +} +var Response_ResponseNote_value = map[string]int32{ + "SEQUENCE_FEED": 1, + "ATOM_FEED": 2, + "ORDER_BY_LIMIT_FEED": 3, + "UNIONED_FEED": 4, + "INCLUDES_STATES": 5, +} + +func (x Response_ResponseNote) Enum() *Response_ResponseNote { + p := new(Response_ResponseNote) + *p = x + return p +} +func (x Response_ResponseNote) String() string { + return proto.EnumName(Response_ResponseNote_name, int32(x)) +} +func (x Response_ResponseNote) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Response_ResponseNote) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Response_ResponseNote_value, data, "Response_ResponseNote") + if err != nil { + return err + } + *x = Response_ResponseNote(value) + return nil +} + +type Datum_DatumType int32 + +const ( + Datum_R_NULL Datum_DatumType = 1 + Datum_R_BOOL Datum_DatumType = 2 + Datum_R_NUM Datum_DatumType = 3 + Datum_R_STR Datum_DatumType = 4 + Datum_R_ARRAY Datum_DatumType = 5 + Datum_R_OBJECT Datum_DatumType = 6 + Datum_R_JSON Datum_DatumType = 7 +) + +var Datum_DatumType_name = map[int32]string{ + 1: "R_NULL", + 2: "R_BOOL", + 3: "R_NUM", + 4: "R_STR", + 5: "R_ARRAY", + 6: "R_OBJECT", + 7: "R_JSON", +} +var Datum_DatumType_value = map[string]int32{ + "R_NULL": 1, + "R_BOOL": 2, + "R_NUM": 3, + "R_STR": 4, + "R_ARRAY": 5, + "R_OBJECT": 6, + "R_JSON": 7, +} + +func (x Datum_DatumType) Enum() *Datum_DatumType { + p := new(Datum_DatumType) + *p = x + return p +} +func (x Datum_DatumType) String() string { + return proto.EnumName(Datum_DatumType_name, int32(x)) +} +func (x Datum_DatumType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Datum_DatumType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Datum_DatumType_value, data, "Datum_DatumType") + if err != nil { + return err + } + *x = Datum_DatumType(value) + return nil +} + +type Term_TermType int32 + +const ( + Term_DATUM Term_TermType = 1 + Term_MAKE_ARRAY Term_TermType = 2 + Term_MAKE_OBJ Term_TermType = 3 + Term_VAR Term_TermType = 10 + Term_JAVASCRIPT Term_TermType = 11 + Term_UUID Term_TermType = 169 + Term_HTTP Term_TermType = 153 + Term_ERROR Term_TermType = 12 + Term_IMPLICIT_VAR Term_TermType = 13 + Term_DB Term_TermType = 14 + Term_TABLE Term_TermType = 15 + Term_GET Term_TermType = 16 + Term_GET_ALL Term_TermType = 78 + Term_EQ Term_TermType = 17 + Term_NE Term_TermType = 18 + Term_LT Term_TermType = 19 + Term_LE Term_TermType = 20 + Term_GT Term_TermType = 21 + Term_GE Term_TermType = 22 + Term_NOT Term_TermType = 23 + Term_ADD Term_TermType = 24 + Term_SUB Term_TermType = 25 + Term_MUL Term_TermType = 26 + Term_DIV Term_TermType = 27 + Term_MOD Term_TermType = 28 + Term_APPEND Term_TermType = 29 + Term_PREPEND Term_TermType = 80 + Term_DIFFERENCE Term_TermType = 95 + Term_SET_INSERT Term_TermType = 88 + Term_SET_INTERSECTION Term_TermType = 89 + Term_SET_UNION Term_TermType = 90 + Term_SET_DIFFERENCE Term_TermType = 91 + Term_SLICE Term_TermType = 30 + Term_SKIP Term_TermType = 70 + Term_LIMIT Term_TermType = 71 + Term_OFFSETS_OF Term_TermType = 87 + Term_CONTAINS Term_TermType = 93 + Term_GET_FIELD Term_TermType = 31 + Term_KEYS Term_TermType = 94 + Term_OBJECT Term_TermType = 143 + Term_HAS_FIELDS Term_TermType = 32 + Term_WITH_FIELDS Term_TermType = 96 + Term_PLUCK Term_TermType = 33 + Term_WITHOUT Term_TermType = 34 + Term_MERGE Term_TermType = 35 + Term_BETWEEN_DEPRECATED Term_TermType = 36 + Term_BETWEEN Term_TermType = 182 + Term_REDUCE Term_TermType = 37 + Term_MAP Term_TermType = 38 + Term_FILTER Term_TermType = 39 + Term_CONCAT_MAP Term_TermType = 40 + Term_ORDER_BY Term_TermType = 41 + Term_DISTINCT Term_TermType = 42 + Term_COUNT Term_TermType = 43 + Term_IS_EMPTY Term_TermType = 86 + Term_UNION Term_TermType = 44 + Term_NTH Term_TermType = 45 + Term_BRACKET Term_TermType = 170 + Term_INNER_JOIN Term_TermType = 48 + Term_OUTER_JOIN Term_TermType = 49 + Term_EQ_JOIN Term_TermType = 50 + Term_ZIP Term_TermType = 72 + Term_RANGE Term_TermType = 173 + Term_INSERT_AT Term_TermType = 82 + Term_DELETE_AT Term_TermType = 83 + Term_CHANGE_AT Term_TermType = 84 + Term_SPLICE_AT Term_TermType = 85 + Term_COERCE_TO Term_TermType = 51 + Term_TYPE_OF Term_TermType = 52 + Term_UPDATE Term_TermType = 53 + Term_DELETE Term_TermType = 54 + Term_REPLACE Term_TermType = 55 + Term_INSERT Term_TermType = 56 + Term_DB_CREATE Term_TermType = 57 + Term_DB_DROP Term_TermType = 58 + Term_DB_LIST Term_TermType = 59 + Term_TABLE_CREATE Term_TermType = 60 + Term_TABLE_DROP Term_TermType = 61 + Term_TABLE_LIST Term_TermType = 62 + Term_CONFIG Term_TermType = 174 + Term_STATUS Term_TermType = 175 + Term_WAIT Term_TermType = 177 + Term_RECONFIGURE Term_TermType = 176 + Term_REBALANCE Term_TermType = 179 + Term_SYNC Term_TermType = 138 + Term_INDEX_CREATE Term_TermType = 75 + Term_INDEX_DROP Term_TermType = 76 + Term_INDEX_LIST Term_TermType = 77 + Term_INDEX_STATUS Term_TermType = 139 + Term_INDEX_WAIT Term_TermType = 140 + Term_INDEX_RENAME Term_TermType = 156 + Term_FUNCALL Term_TermType = 64 + Term_BRANCH Term_TermType = 65 + Term_OR Term_TermType = 66 + Term_AND Term_TermType = 67 + Term_FOR_EACH Term_TermType = 68 + Term_FUNC Term_TermType = 69 + Term_ASC Term_TermType = 73 + Term_DESC Term_TermType = 74 + Term_INFO Term_TermType = 79 + Term_MATCH Term_TermType = 97 + Term_UPCASE Term_TermType = 141 + Term_DOWNCASE Term_TermType = 142 + Term_SAMPLE Term_TermType = 81 + Term_DEFAULT Term_TermType = 92 + Term_JSON Term_TermType = 98 + Term_TO_JSON_STRING Term_TermType = 172 + Term_ISO8601 Term_TermType = 99 + Term_TO_ISO8601 Term_TermType = 100 + Term_EPOCH_TIME Term_TermType = 101 + Term_TO_EPOCH_TIME Term_TermType = 102 + Term_NOW Term_TermType = 103 + Term_IN_TIMEZONE Term_TermType = 104 + Term_DURING Term_TermType = 105 + Term_DATE Term_TermType = 106 + Term_TIME_OF_DAY Term_TermType = 126 + Term_TIMEZONE Term_TermType = 127 + Term_YEAR Term_TermType = 128 + Term_MONTH Term_TermType = 129 + Term_DAY Term_TermType = 130 + Term_DAY_OF_WEEK Term_TermType = 131 + Term_DAY_OF_YEAR Term_TermType = 132 + Term_HOURS Term_TermType = 133 + Term_MINUTES Term_TermType = 134 + Term_SECONDS Term_TermType = 135 + Term_TIME Term_TermType = 136 + Term_MONDAY Term_TermType = 107 + Term_TUESDAY Term_TermType = 108 + Term_WEDNESDAY Term_TermType = 109 + Term_THURSDAY Term_TermType = 110 + Term_FRIDAY Term_TermType = 111 + Term_SATURDAY Term_TermType = 112 + Term_SUNDAY Term_TermType = 113 + Term_JANUARY Term_TermType = 114 + Term_FEBRUARY Term_TermType = 115 + Term_MARCH Term_TermType = 116 + Term_APRIL Term_TermType = 117 + Term_MAY Term_TermType = 118 + Term_JUNE Term_TermType = 119 + Term_JULY Term_TermType = 120 + Term_AUGUST Term_TermType = 121 + Term_SEPTEMBER Term_TermType = 122 + Term_OCTOBER Term_TermType = 123 + Term_NOVEMBER Term_TermType = 124 + Term_DECEMBER Term_TermType = 125 + Term_LITERAL Term_TermType = 137 + Term_GROUP Term_TermType = 144 + Term_SUM Term_TermType = 145 + Term_AVG Term_TermType = 146 + Term_MIN Term_TermType = 147 + Term_MAX Term_TermType = 148 + Term_SPLIT Term_TermType = 149 + Term_UNGROUP Term_TermType = 150 + Term_RANDOM Term_TermType = 151 + Term_CHANGES Term_TermType = 152 + Term_ARGS Term_TermType = 154 + Term_BINARY Term_TermType = 155 + Term_GEOJSON Term_TermType = 157 + Term_TO_GEOJSON Term_TermType = 158 + Term_POINT Term_TermType = 159 + Term_LINE Term_TermType = 160 + Term_POLYGON Term_TermType = 161 + Term_DISTANCE Term_TermType = 162 + Term_INTERSECTS Term_TermType = 163 + Term_INCLUDES Term_TermType = 164 + Term_CIRCLE Term_TermType = 165 + Term_GET_INTERSECTING Term_TermType = 166 + Term_FILL Term_TermType = 167 + Term_GET_NEAREST Term_TermType = 168 + Term_POLYGON_SUB Term_TermType = 171 + Term_MINVAL Term_TermType = 180 + Term_MAXVAL Term_TermType = 181 +) + +var Term_TermType_name = map[int32]string{ + 1: "DATUM", + 2: "MAKE_ARRAY", + 3: "MAKE_OBJ", + 10: "VAR", + 11: "JAVASCRIPT", + 169: "UUID", + 153: "HTTP", + 12: "ERROR", + 13: "IMPLICIT_VAR", + 14: "DB", + 15: "TABLE", + 16: "GET", + 78: "GET_ALL", + 17: "EQ", + 18: "NE", + 19: "LT", + 20: "LE", + 21: "GT", + 22: "GE", + 23: "NOT", + 24: "ADD", + 25: "SUB", + 26: "MUL", + 27: "DIV", + 28: "MOD", + 29: "APPEND", + 80: "PREPEND", + 95: "DIFFERENCE", + 88: "SET_INSERT", + 89: "SET_INTERSECTION", + 90: "SET_UNION", + 91: "SET_DIFFERENCE", + 30: "SLICE", + 70: "SKIP", + 71: "LIMIT", + 87: "OFFSETS_OF", + 93: "CONTAINS", + 31: "GET_FIELD", + 94: "KEYS", + 143: "OBJECT", + 32: "HAS_FIELDS", + 96: "WITH_FIELDS", + 33: "PLUCK", + 34: "WITHOUT", + 35: "MERGE", + 36: "BETWEEN_DEPRECATED", + 182: "BETWEEN", + 37: "REDUCE", + 38: "MAP", + 39: "FILTER", + 40: "CONCAT_MAP", + 41: "ORDER_BY", + 42: "DISTINCT", + 43: "COUNT", + 86: "IS_EMPTY", + 44: "UNION", + 45: "NTH", + 170: "BRACKET", + 48: "INNER_JOIN", + 49: "OUTER_JOIN", + 50: "EQ_JOIN", + 72: "ZIP", + 173: "RANGE", + 82: "INSERT_AT", + 83: "DELETE_AT", + 84: "CHANGE_AT", + 85: "SPLICE_AT", + 51: "COERCE_TO", + 52: "TYPE_OF", + 53: "UPDATE", + 54: "DELETE", + 55: "REPLACE", + 56: "INSERT", + 57: "DB_CREATE", + 58: "DB_DROP", + 59: "DB_LIST", + 60: "TABLE_CREATE", + 61: "TABLE_DROP", + 62: "TABLE_LIST", + 174: "CONFIG", + 175: "STATUS", + 177: "WAIT", + 176: "RECONFIGURE", + 179: "REBALANCE", + 138: "SYNC", + 75: "INDEX_CREATE", + 76: "INDEX_DROP", + 77: "INDEX_LIST", + 139: "INDEX_STATUS", + 140: "INDEX_WAIT", + 156: "INDEX_RENAME", + 64: "FUNCALL", + 65: "BRANCH", + 66: "OR", + 67: "AND", + 68: "FOR_EACH", + 69: "FUNC", + 73: "ASC", + 74: "DESC", + 79: "INFO", + 97: "MATCH", + 141: "UPCASE", + 142: "DOWNCASE", + 81: "SAMPLE", + 92: "DEFAULT", + 98: "JSON", + 172: "TO_JSON_STRING", + 99: "ISO8601", + 100: "TO_ISO8601", + 101: "EPOCH_TIME", + 102: "TO_EPOCH_TIME", + 103: "NOW", + 104: "IN_TIMEZONE", + 105: "DURING", + 106: "DATE", + 126: "TIME_OF_DAY", + 127: "TIMEZONE", + 128: "YEAR", + 129: "MONTH", + 130: "DAY", + 131: "DAY_OF_WEEK", + 132: "DAY_OF_YEAR", + 133: "HOURS", + 134: "MINUTES", + 135: "SECONDS", + 136: "TIME", + 107: "MONDAY", + 108: "TUESDAY", + 109: "WEDNESDAY", + 110: "THURSDAY", + 111: "FRIDAY", + 112: "SATURDAY", + 113: "SUNDAY", + 114: "JANUARY", + 115: "FEBRUARY", + 116: "MARCH", + 117: "APRIL", + 118: "MAY", + 119: "JUNE", + 120: "JULY", + 121: "AUGUST", + 122: "SEPTEMBER", + 123: "OCTOBER", + 124: "NOVEMBER", + 125: "DECEMBER", + 137: "LITERAL", + 144: "GROUP", + 145: "SUM", + 146: "AVG", + 147: "MIN", + 148: "MAX", + 149: "SPLIT", + 150: "UNGROUP", + 151: "RANDOM", + 152: "CHANGES", + 154: "ARGS", + 155: "BINARY", + 157: "GEOJSON", + 158: "TO_GEOJSON", + 159: "POINT", + 160: "LINE", + 161: "POLYGON", + 162: "DISTANCE", + 163: "INTERSECTS", + 164: "INCLUDES", + 165: "CIRCLE", + 166: "GET_INTERSECTING", + 167: "FILL", + 168: "GET_NEAREST", + 171: "POLYGON_SUB", + 180: "MINVAL", + 181: "MAXVAL", +} +var Term_TermType_value = map[string]int32{ + "DATUM": 1, + "MAKE_ARRAY": 2, + "MAKE_OBJ": 3, + "VAR": 10, + "JAVASCRIPT": 11, + "UUID": 169, + "HTTP": 153, + "ERROR": 12, + "IMPLICIT_VAR": 13, + "DB": 14, + "TABLE": 15, + "GET": 16, + "GET_ALL": 78, + "EQ": 17, + "NE": 18, + "LT": 19, + "LE": 20, + "GT": 21, + "GE": 22, + "NOT": 23, + "ADD": 24, + "SUB": 25, + "MUL": 26, + "DIV": 27, + "MOD": 28, + "APPEND": 29, + "PREPEND": 80, + "DIFFERENCE": 95, + "SET_INSERT": 88, + "SET_INTERSECTION": 89, + "SET_UNION": 90, + "SET_DIFFERENCE": 91, + "SLICE": 30, + "SKIP": 70, + "LIMIT": 71, + "OFFSETS_OF": 87, + "CONTAINS": 93, + "GET_FIELD": 31, + "KEYS": 94, + "OBJECT": 143, + "HAS_FIELDS": 32, + "WITH_FIELDS": 96, + "PLUCK": 33, + "WITHOUT": 34, + "MERGE": 35, + "BETWEEN_DEPRECATED": 36, + "BETWEEN": 182, + "REDUCE": 37, + "MAP": 38, + "FILTER": 39, + "CONCAT_MAP": 40, + "ORDER_BY": 41, + "DISTINCT": 42, + "COUNT": 43, + "IS_EMPTY": 86, + "UNION": 44, + "NTH": 45, + "BRACKET": 170, + "INNER_JOIN": 48, + "OUTER_JOIN": 49, + "EQ_JOIN": 50, + "ZIP": 72, + "RANGE": 173, + "INSERT_AT": 82, + "DELETE_AT": 83, + "CHANGE_AT": 84, + "SPLICE_AT": 85, + "COERCE_TO": 51, + "TYPE_OF": 52, + "UPDATE": 53, + "DELETE": 54, + "REPLACE": 55, + "INSERT": 56, + "DB_CREATE": 57, + "DB_DROP": 58, + "DB_LIST": 59, + "TABLE_CREATE": 60, + "TABLE_DROP": 61, + "TABLE_LIST": 62, + "CONFIG": 174, + "STATUS": 175, + "WAIT": 177, + "RECONFIGURE": 176, + "REBALANCE": 179, + "SYNC": 138, + "INDEX_CREATE": 75, + "INDEX_DROP": 76, + "INDEX_LIST": 77, + "INDEX_STATUS": 139, + "INDEX_WAIT": 140, + "INDEX_RENAME": 156, + "FUNCALL": 64, + "BRANCH": 65, + "OR": 66, + "AND": 67, + "FOR_EACH": 68, + "FUNC": 69, + "ASC": 73, + "DESC": 74, + "INFO": 79, + "MATCH": 97, + "UPCASE": 141, + "DOWNCASE": 142, + "SAMPLE": 81, + "DEFAULT": 92, + "JSON": 98, + "TO_JSON_STRING": 172, + "ISO8601": 99, + "TO_ISO8601": 100, + "EPOCH_TIME": 101, + "TO_EPOCH_TIME": 102, + "NOW": 103, + "IN_TIMEZONE": 104, + "DURING": 105, + "DATE": 106, + "TIME_OF_DAY": 126, + "TIMEZONE": 127, + "YEAR": 128, + "MONTH": 129, + "DAY": 130, + "DAY_OF_WEEK": 131, + "DAY_OF_YEAR": 132, + "HOURS": 133, + "MINUTES": 134, + "SECONDS": 135, + "TIME": 136, + "MONDAY": 107, + "TUESDAY": 108, + "WEDNESDAY": 109, + "THURSDAY": 110, + "FRIDAY": 111, + "SATURDAY": 112, + "SUNDAY": 113, + "JANUARY": 114, + "FEBRUARY": 115, + "MARCH": 116, + "APRIL": 117, + "MAY": 118, + "JUNE": 119, + "JULY": 120, + "AUGUST": 121, + "SEPTEMBER": 122, + "OCTOBER": 123, + "NOVEMBER": 124, + "DECEMBER": 125, + "LITERAL": 137, + "GROUP": 144, + "SUM": 145, + "AVG": 146, + "MIN": 147, + "MAX": 148, + "SPLIT": 149, + "UNGROUP": 150, + "RANDOM": 151, + "CHANGES": 152, + "ARGS": 154, + "BINARY": 155, + "GEOJSON": 157, + "TO_GEOJSON": 158, + "POINT": 159, + "LINE": 160, + "POLYGON": 161, + "DISTANCE": 162, + "INTERSECTS": 163, + "INCLUDES": 164, + "CIRCLE": 165, + "GET_INTERSECTING": 166, + "FILL": 167, + "GET_NEAREST": 168, + "POLYGON_SUB": 171, + "MINVAL": 180, + "MAXVAL": 181, +} + +func (x Term_TermType) Enum() *Term_TermType { + p := new(Term_TermType) + *p = x + return p +} +func (x Term_TermType) String() string { + return proto.EnumName(Term_TermType_name, int32(x)) +} +func (x Term_TermType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Term_TermType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Term_TermType_value, data, "Term_TermType") + if err != nil { + return err + } + *x = Term_TermType(value) + return nil +} + +type VersionDummy struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VersionDummy) Reset() { *m = VersionDummy{} } +func (m *VersionDummy) String() string { return proto.CompactTextString(m) } +func (*VersionDummy) ProtoMessage() {} + +type Query struct { + Type *Query_QueryType `protobuf:"varint,1,opt,name=type,enum=Query_QueryType" json:"type,omitempty"` + Query *Term `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` + Token *int64 `protobuf:"varint,3,opt,name=token" json:"token,omitempty"` + OBSOLETENoreply *bool `protobuf:"varint,4,opt,name=OBSOLETE_noreply,def=0" json:"OBSOLETE_noreply,omitempty"` + AcceptsRJson *bool `protobuf:"varint,5,opt,name=accepts_r_json,def=0" json:"accepts_r_json,omitempty"` + GlobalOptargs []*Query_AssocPair `protobuf:"bytes,6,rep,name=global_optargs" json:"global_optargs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_OBSOLETENoreply bool = false +const Default_Query_AcceptsRJson bool = false + +func (m *Query) GetType() Query_QueryType { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func (m *Query) GetQuery() *Term { + if m != nil { + return m.Query + } + return nil +} + +func (m *Query) GetToken() int64 { + if m != nil && m.Token != nil { + return *m.Token + } + return 0 +} + +func (m *Query) GetOBSOLETENoreply() bool { + if m != nil && m.OBSOLETENoreply != nil { + return *m.OBSOLETENoreply + } + return Default_Query_OBSOLETENoreply +} + +func (m *Query) GetAcceptsRJson() bool { + if m != nil && m.AcceptsRJson != nil { + return *m.AcceptsRJson + } + return Default_Query_AcceptsRJson +} + +func (m *Query) GetGlobalOptargs() []*Query_AssocPair { + if m != nil { + return m.GlobalOptargs + } + return nil +} + +type Query_AssocPair struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Val *Term `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_AssocPair) Reset() { *m = Query_AssocPair{} } +func (m *Query_AssocPair) String() string { return proto.CompactTextString(m) } +func (*Query_AssocPair) ProtoMessage() {} + +func (m *Query_AssocPair) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Query_AssocPair) GetVal() *Term { + if m != nil { + return m.Val + } + return nil +} + +type Frame struct { + Type *Frame_FrameType `protobuf:"varint,1,opt,name=type,enum=Frame_FrameType" json:"type,omitempty"` + Pos *int64 `protobuf:"varint,2,opt,name=pos" json:"pos,omitempty"` + Opt *string `protobuf:"bytes,3,opt,name=opt" json:"opt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Frame) Reset() { *m = Frame{} } +func (m *Frame) String() string { return proto.CompactTextString(m) } +func (*Frame) ProtoMessage() {} + +func (m *Frame) GetType() Frame_FrameType { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func (m *Frame) GetPos() int64 { + if m != nil && m.Pos != nil { + return *m.Pos + } + return 0 +} + +func (m *Frame) GetOpt() string { + if m != nil && m.Opt != nil { + return *m.Opt + } + return "" +} + +type Backtrace struct { + Frames []*Frame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Backtrace) Reset() { *m = Backtrace{} } +func (m *Backtrace) String() string { return proto.CompactTextString(m) } +func (*Backtrace) ProtoMessage() {} + +func (m *Backtrace) GetFrames() []*Frame { + if m != nil { + return m.Frames + } + return nil +} + +type Response struct { + Type *Response_ResponseType `protobuf:"varint,1,opt,name=type,enum=Response_ResponseType" json:"type,omitempty"` + Notes []Response_ResponseNote `protobuf:"varint,6,rep,name=notes,enum=Response_ResponseNote" json:"notes,omitempty"` + Token *int64 `protobuf:"varint,2,opt,name=token" json:"token,omitempty"` + Response []*Datum `protobuf:"bytes,3,rep,name=response" json:"response,omitempty"` + Backtrace *Backtrace `protobuf:"bytes,4,opt,name=backtrace" json:"backtrace,omitempty"` + Profile *Datum `protobuf:"bytes,5,opt,name=profile" json:"profile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetType() Response_ResponseType { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func (m *Response) GetNotes() []Response_ResponseNote { + if m != nil { + return m.Notes + } + return nil +} + +func (m *Response) GetToken() int64 { + if m != nil && m.Token != nil { + return *m.Token + } + return 0 +} + +func (m *Response) GetResponse() []*Datum { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetBacktrace() *Backtrace { + if m != nil { + return m.Backtrace + } + return nil +} + +func (m *Response) GetProfile() *Datum { + if m != nil { + return m.Profile + } + return nil +} + +type Datum struct { + Type *Datum_DatumType `protobuf:"varint,1,opt,name=type,enum=Datum_DatumType" json:"type,omitempty"` + RBool *bool `protobuf:"varint,2,opt,name=r_bool" json:"r_bool,omitempty"` + RNum *float64 `protobuf:"fixed64,3,opt,name=r_num" json:"r_num,omitempty"` + RStr *string `protobuf:"bytes,4,opt,name=r_str" json:"r_str,omitempty"` + RArray []*Datum `protobuf:"bytes,5,rep,name=r_array" json:"r_array,omitempty"` + RObject []*Datum_AssocPair `protobuf:"bytes,6,rep,name=r_object" json:"r_object,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Datum) Reset() { *m = Datum{} } +func (m *Datum) String() string { return proto.CompactTextString(m) } +func (*Datum) ProtoMessage() {} + +var extRange_Datum = []proto.ExtensionRange{ + {10000, 20000}, +} + +func (*Datum) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Datum +} +func (m *Datum) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *Datum) GetType() Datum_DatumType { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func (m *Datum) GetRBool() bool { + if m != nil && m.RBool != nil { + return *m.RBool + } + return false +} + +func (m *Datum) GetRNum() float64 { + if m != nil && m.RNum != nil { + return *m.RNum + } + return 0 +} + +func (m *Datum) GetRStr() string { + if m != nil && m.RStr != nil { + return *m.RStr + } + return "" +} + +func (m *Datum) GetRArray() []*Datum { + if m != nil { + return m.RArray + } + return nil +} + +func (m *Datum) GetRObject() []*Datum_AssocPair { + if m != nil { + return m.RObject + } + return nil +} + +type Datum_AssocPair struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Val *Datum `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Datum_AssocPair) Reset() { *m = Datum_AssocPair{} } +func (m *Datum_AssocPair) String() string { return proto.CompactTextString(m) } +func (*Datum_AssocPair) ProtoMessage() {} + +func (m *Datum_AssocPair) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Datum_AssocPair) GetVal() *Datum { + if m != nil { + return m.Val + } + return nil +} + +type Term struct { + Type *Term_TermType `protobuf:"varint,1,opt,name=type,enum=Term_TermType" json:"type,omitempty"` + Datum *Datum `protobuf:"bytes,2,opt,name=datum" json:"datum,omitempty"` + Args []*Term `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + Optargs []*Term_AssocPair `protobuf:"bytes,4,rep,name=optargs" json:"optargs,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Term) Reset() { *m = Term{} } +func (m *Term) String() string { return proto.CompactTextString(m) } +func (*Term) ProtoMessage() {} + +var extRange_Term = []proto.ExtensionRange{ + {10000, 20000}, +} + +func (*Term) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Term +} +func (m *Term) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *Term) GetType() Term_TermType { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func (m *Term) GetDatum() *Datum { + if m != nil { + return m.Datum + } + return nil +} + +func (m *Term) GetArgs() []*Term { + if m != nil { + return m.Args + } + return nil +} + +func (m *Term) GetOptargs() []*Term_AssocPair { + if m != nil { + return m.Optargs + } + return nil +} + +type Term_AssocPair struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Val *Term `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Term_AssocPair) Reset() { *m = Term_AssocPair{} } +func (m *Term_AssocPair) String() string { return proto.CompactTextString(m) } +func (*Term_AssocPair) ProtoMessage() {} + +func (m *Term_AssocPair) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Term_AssocPair) GetVal() *Term { + if m != nil { + return m.Val + } + return nil +} + +func init() { + proto.RegisterEnum("VersionDummy_Version", VersionDummy_Version_name, VersionDummy_Version_value) + proto.RegisterEnum("VersionDummy_Protocol", VersionDummy_Protocol_name, VersionDummy_Protocol_value) + proto.RegisterEnum("Query_QueryType", Query_QueryType_name, Query_QueryType_value) + proto.RegisterEnum("Frame_FrameType", Frame_FrameType_name, Frame_FrameType_value) + proto.RegisterEnum("Response_ResponseType", Response_ResponseType_name, Response_ResponseType_value) + proto.RegisterEnum("Response_ResponseNote", Response_ResponseNote_name, Response_ResponseNote_value) + proto.RegisterEnum("Datum_DatumType", Datum_DatumType_name, Datum_DatumType_value) + proto.RegisterEnum("Term_TermType", Term_TermType_name, Term_TermType_value) +} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto b/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto new file mode 100644 index 000000000..3ad50a2dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto @@ -0,0 +1,805 @@ +//////////////////////////////////////////////////////////////////////////////// +// THE HIGH-LEVEL VIEW // +//////////////////////////////////////////////////////////////////////////////// + +// Process: When you first open a connection, send the magic number +// for the version of the protobuf you're targeting (in the [Version] +// enum). This should **NOT** be sent as a protobuf; just send the +// little-endian 32-bit integer over the wire raw. This number should +// only be sent once per connection. + +// The magic number shall be followed by an authorization key. The +// first 4 bytes are the length of the key to be sent as a little-endian +// 32-bit integer, followed by the key string. Even if there is no key, +// an empty string should be sent (length 0 and no data). + +// Following the authorization key, the client shall send a magic number +// for the communication protocol they want to use (in the [Protocol] +// enum). This shall be a little-endian 32-bit integer. + +// The server will then respond with a NULL-terminated string response. +// "SUCCESS" indicates that the connection has been accepted. Any other +// response indicates an error, and the response string should describe +// the error. + +// Next, for each query you want to send, construct a [Query] protobuf +// and serialize it to a binary blob. Send the blob's size to the +// server encoded as a little-endian 32-bit integer, followed by the +// blob itself. You will recieve a [Response] protobuf back preceded +// by its own size, once again encoded as a little-endian 32-bit +// integer. You can see an example exchange below in **EXAMPLE**. + +// A query consists of a [Term] to evaluate and a unique-per-connection +// [token]. + +// Tokens are used for two things: +// * Keeping track of which responses correspond to which queries. +// * Batched queries. Some queries return lots of results, so we send back +// batches of <1000, and you need to send a [CONTINUE] query with the same +// token to get more results from the original query. +//////////////////////////////////////////////////////////////////////////////// + +message VersionDummy { // We need to wrap it like this for some + // non-conforming protobuf libraries + // This enum contains the magic numbers for your version. See **THE HIGH-LEVEL + // VIEW** for what to do with it. + enum Version { + V0_1 = 0x3f61ba36; + V0_2 = 0x723081e1; // Authorization key during handshake + V0_3 = 0x5f75e83e; // Authorization key and protocol during handshake + V0_4 = 0x400c2d20; // Queries execute in parallel + } + + // The protocol to use after the handshake, specified in V0_3 + enum Protocol { + PROTOBUF = 0x271ffc41; + JSON = 0x7e6970c7; + } +} + +// You send one of: +// * A [START] query with a [Term] to evaluate and a unique-per-connection token. +// * A [CONTINUE] query with the same token as a [START] query that returned +// [SUCCESS_PARTIAL] in its [Response]. +// * A [STOP] query with the same token as a [START] query that you want to stop. +// * A [NOREPLY_WAIT] query with a unique per-connection token. The server answers +// with a [WAIT_COMPLETE] [Response]. +message Query { + enum QueryType { + START = 1; // Start a new query. + CONTINUE = 2; // Continue a query that returned [SUCCESS_PARTIAL] + // (see [Response]). + STOP = 3; // Stop a query partway through executing. + NOREPLY_WAIT = 4; + // Wait for noreply operations to finish. + } + optional QueryType type = 1; + // A [Term] is how we represent the operations we want a query to perform. + optional Term query = 2; // only present when [type] = [START] + optional int64 token = 3; + // This flag is ignored on the server. `noreply` should be added + // to `global_optargs` instead (the key "noreply" should map to + // either true or false). + optional bool OBSOLETE_noreply = 4 [default = false]; + + // If this is set to [true], then [Datum] values will sometimes be + // of [DatumType] [R_JSON] (see below). This can provide enormous + // speedups in languages with poor protobuf libraries. + optional bool accepts_r_json = 5 [default = false]; + + message AssocPair { + optional string key = 1; + optional Term val = 2; + } + repeated AssocPair global_optargs = 6; +} + +// A backtrace frame (see `backtrace` in Response below) +message Frame { + enum FrameType { + POS = 1; // Error occured in a positional argument. + OPT = 2; // Error occured in an optional argument. + } + optional FrameType type = 1; + optional int64 pos = 2; // The index of the positional argument. + optional string opt = 3; // The name of the optional argument. +} +message Backtrace { + repeated Frame frames = 1; +} + +// You get back a response with the same [token] as your query. +message Response { + enum ResponseType { + // These response types indicate success. + SUCCESS_ATOM = 1; // Query returned a single RQL datatype. + SUCCESS_SEQUENCE = 2; // Query returned a sequence of RQL datatypes. + SUCCESS_PARTIAL = 3; // Query returned a partial sequence of RQL + // datatypes. If you send a [CONTINUE] query with + // the same token as this response, you will get + // more of the sequence. Keep sending [CONTINUE] + // queries until you get back [SUCCESS_SEQUENCE]. + WAIT_COMPLETE = 4; // A [NOREPLY_WAIT] query completed. + + // These response types indicate failure. + CLIENT_ERROR = 16; // Means the client is buggy. An example is if the + // client sends a malformed protobuf, or tries to + // send [CONTINUE] for an unknown token. + COMPILE_ERROR = 17; // Means the query failed during parsing or type + // checking. For example, if you pass too many + // arguments to a function. + RUNTIME_ERROR = 18; // Means the query failed at runtime. An example is + // if you add together two values from a table, but + // they turn out at runtime to be booleans rather + // than numbers. + } + optional ResponseType type = 1; + + // ResponseNotes are used to provide information about the query + // response that may be useful for people writing drivers or ORMs. + // Currently all the notes we send indicate that a stream has certain + // special properties. + enum ResponseNote { + // The stream is a changefeed stream (e.g. `r.table('test').changes()`). + SEQUENCE_FEED = 1; + // The stream is a point changefeed stream + // (e.g. `r.table('test').get(0).changes()`). + ATOM_FEED = 2; + // The stream is an order_by_limit changefeed stream + // (e.g. `r.table('test').order_by(index: 'id').limit(5).changes()`). + ORDER_BY_LIMIT_FEED = 3; + // The stream is a union of multiple changefeed types that can't be + // collapsed to a single type + // (e.g. `r.table('test').changes().union(r.table('test').get(0).changes())`). + UNIONED_FEED = 4; + // The stream is a changefeed stream and includes notes on what state + // the changefeed stream is in (e.g. objects of the form `{state: + // 'initializing'}`). + INCLUDES_STATES = 5; + } + repeated ResponseNote notes = 6; + + optional int64 token = 2; // Indicates what [Query] this response corresponds to. + + // [response] contains 1 RQL datum if [type] is [SUCCESS_ATOM], or many RQL + // data if [type] is [SUCCESS_SEQUENCE] or [SUCCESS_PARTIAL]. It contains 1 + // error message (of type [R_STR]) in all other cases. + repeated Datum response = 3; + + // If [type] is [CLIENT_ERROR], [TYPE_ERROR], or [RUNTIME_ERROR], then a + // backtrace will be provided. The backtrace says where in the query the + // error occured. Ideally this information will be presented to the user as + // a pretty-printed version of their query with the erroneous section + // underlined. A backtrace is a series of 0 or more [Frame]s, each of which + // specifies either the index of a positional argument or the name of an + // optional argument. (Those words will make more sense if you look at the + // [Term] message below.) + optional Backtrace backtrace = 4; // Contains n [Frame]s when you get back an error. + + // If the [global_optargs] in the [Query] that this [Response] is a + // response to contains a key "profile" which maps to a static value of + // true then [profile] will contain a [Datum] which provides profiling + // information about the execution of the query. This field should be + // returned to the user along with the result that would normally be + // returned (a datum or a cursor). In official drivers this is accomplished + // by putting them inside of an object with "value" mapping to the return + // value and "profile" mapping to the profile object. + optional Datum profile = 5; +} + +// A [Datum] is a chunk of data that can be serialized to disk or returned to +// the user in a Response. Currently we only support JSON types, but we may +// support other types in the future (e.g., a date type or an integer type). +message Datum { + enum DatumType { + R_NULL = 1; + R_BOOL = 2; + R_NUM = 3; // a double + R_STR = 4; + R_ARRAY = 5; + R_OBJECT = 6; + // This [DatumType] will only be used if [accepts_r_json] is + // set to [true] in [Query]. [r_str] will be filled with a + // JSON encoding of the [Datum]. + R_JSON = 7; // uses r_str + } + optional DatumType type = 1; + optional bool r_bool = 2; + optional double r_num = 3; + optional string r_str = 4; + + repeated Datum r_array = 5; + message AssocPair { + optional string key = 1; + optional Datum val = 2; + } + repeated AssocPair r_object = 6; + + extensions 10000 to 20000; +} + +// A [Term] is either a piece of data (see **Datum** above), or an operator and +// its operands. If you have a [Datum], it's stored in the member [datum]. If +// you have an operator, its positional arguments are stored in [args] and its +// optional arguments are stored in [optargs]. +// +// A note about type signatures: +// We use the following notation to denote types: +// arg1_type, arg2_type, argrest_type... -> result_type +// So, for example, if we have a function `avg` that takes any number of +// arguments and averages them, we might write: +// NUMBER... -> NUMBER +// Or if we had a function that took one number modulo another: +// NUMBER, NUMBER -> NUMBER +// Or a function that takes a table and a primary key of any Datum type, then +// retrieves the entry with that primary key: +// Table, DATUM -> OBJECT +// Some arguments must be provided as literal values (and not the results of sub +// terms). These are marked with a `!`. +// Optional arguments are specified within curly braces as argname `:` value +// type (e.x `{use_outdated:BOOL}`) +// Many RQL operations are polymorphic. For these, alterantive type signatures +// are separated by `|`. +// +// The RQL type hierarchy is as follows: +// Top +// DATUM +// NULL +// BOOL +// NUMBER +// STRING +// OBJECT +// SingleSelection +// ARRAY +// Sequence +// ARRAY +// Stream +// StreamSelection +// Table +// Database +// Function +// Ordering - used only by ORDER_BY +// Pathspec -- an object, string, or array that specifies a path +// Error +message Term { + enum TermType { + // A RQL datum, stored in `datum` below. + DATUM = 1; + + MAKE_ARRAY = 2; // DATUM... -> ARRAY + // Evaluate the terms in [optargs] and make an object + MAKE_OBJ = 3; // {...} -> OBJECT + + // * Compound types + + // Takes an integer representing a variable and returns the value stored + // in that variable. It's the responsibility of the client to translate + // from their local representation of a variable to a unique _non-negative_ + // integer for that variable. (We do it this way instead of letting + // clients provide variable names as strings to discourage + // variable-capturing client libraries, and because it's more efficient + // on the wire.) + VAR = 10; // !NUMBER -> DATUM + // Takes some javascript code and executes it. + JAVASCRIPT = 11; // STRING {timeout: !NUMBER} -> DATUM | + // STRING {timeout: !NUMBER} -> Function(*) + UUID = 169; // () -> DATUM + + // Takes an HTTP URL and gets it. If the get succeeds and + // returns valid JSON, it is converted into a DATUM + HTTP = 153; // STRING {data: OBJECT | STRING, + // timeout: !NUMBER, + // method: STRING, + // params: OBJECT, + // header: OBJECT | ARRAY, + // attempts: NUMBER, + // redirects: NUMBER, + // verify: BOOL, + // page: FUNC | STRING, + // page_limit: NUMBER, + // auth: OBJECT, + // result_format: STRING, + // } -> STRING | STREAM + + // Takes a string and throws an error with that message. + // Inside of a `default` block, you can omit the first + // argument to rethrow whatever error you catch (this is most + // useful as an argument to the `default` filter optarg). + ERROR = 12; // STRING -> Error | -> Error + // Takes nothing and returns a reference to the implicit variable. + IMPLICIT_VAR = 13; // -> DATUM + + // * Data Operators + // Returns a reference to a database. + DB = 14; // STRING -> Database + // Returns a reference to a table. + TABLE = 15; // Database, STRING, {use_outdated:BOOL, identifier_format:STRING} -> Table + // STRING, {use_outdated:BOOL, identifier_format:STRING} -> Table + // Gets a single element from a table by its primary or a secondary key. + GET = 16; // Table, STRING -> SingleSelection | Table, NUMBER -> SingleSelection | + // Table, STRING -> NULL | Table, NUMBER -> NULL | + GET_ALL = 78; // Table, DATUM..., {index:!STRING} => ARRAY + + // Simple DATUM Ops + EQ = 17; // DATUM... -> BOOL + NE = 18; // DATUM... -> BOOL + LT = 19; // DATUM... -> BOOL + LE = 20; // DATUM... -> BOOL + GT = 21; // DATUM... -> BOOL + GE = 22; // DATUM... -> BOOL + NOT = 23; // BOOL -> BOOL + // ADD can either add two numbers or concatenate two arrays. + ADD = 24; // NUMBER... -> NUMBER | STRING... -> STRING + SUB = 25; // NUMBER... -> NUMBER + MUL = 26; // NUMBER... -> NUMBER + DIV = 27; // NUMBER... -> NUMBER + MOD = 28; // NUMBER, NUMBER -> NUMBER + + // DATUM Array Ops + // Append a single element to the end of an array (like `snoc`). + APPEND = 29; // ARRAY, DATUM -> ARRAY + // Prepend a single element to the end of an array (like `cons`). + PREPEND = 80; // ARRAY, DATUM -> ARRAY + //Remove the elements of one array from another array. + DIFFERENCE = 95; // ARRAY, ARRAY -> ARRAY + + // DATUM Set Ops + // Set ops work on arrays. They don't use actual sets and thus have + // performance characteristics you would expect from arrays rather than + // from sets. All set operations have the post condition that they + // array they return contains no duplicate values. + SET_INSERT = 88; // ARRAY, DATUM -> ARRAY + SET_INTERSECTION = 89; // ARRAY, ARRAY -> ARRAY + SET_UNION = 90; // ARRAY, ARRAY -> ARRAY + SET_DIFFERENCE = 91; // ARRAY, ARRAY -> ARRAY + + SLICE = 30; // Sequence, NUMBER, NUMBER -> Sequence + SKIP = 70; // Sequence, NUMBER -> Sequence + LIMIT = 71; // Sequence, NUMBER -> Sequence + OFFSETS_OF = 87; // Sequence, DATUM -> Sequence | Sequence, Function(1) -> Sequence + CONTAINS = 93; // Sequence, DATUM -> BOOL | Sequence, Function(1) -> BOOL + + // Stream/Object Ops + // Get a particular field from an object, or map that over a + // sequence. + GET_FIELD = 31; // OBJECT, STRING -> DATUM + // | Sequence, STRING -> Sequence + // Return an array containing the keys of the object. + KEYS = 94; // OBJECT -> ARRAY + // Creates an object + OBJECT = 143; // STRING, DATUM, ... -> OBJECT + // Check whether an object contains all the specified fields, + // or filters a sequence so that all objects inside of it + // contain all the specified fields. + HAS_FIELDS = 32; // OBJECT, Pathspec... -> BOOL + // x.with_fields(...) <=> x.has_fields(...).pluck(...) + WITH_FIELDS = 96; // Sequence, Pathspec... -> Sequence + // Get a subset of an object by selecting some attributes to preserve, + // or map that over a sequence. (Both pick and pluck, polymorphic.) + PLUCK = 33; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT + // Get a subset of an object by selecting some attributes to discard, or + // map that over a sequence. (Both unpick and without, polymorphic.) + WITHOUT = 34; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT + // Merge objects (right-preferential) + MERGE = 35; // OBJECT... -> OBJECT | Sequence -> Sequence + + // Sequence Ops + // Get all elements of a sequence between two values. + // Half-open by default, but the openness of either side can be + // changed by passing 'closed' or 'open for `right_bound` or + // `left_bound`. + BETWEEN_DEPRECATED = 36; // Deprecated version of between, which allows `null` to specify unboundedness + // With the newer version, clients should use `r.minval` and `r.maxval` for unboundedness + BETWEEN = 182; // StreamSelection, DATUM, DATUM, {index:!STRING, right_bound:STRING, left_bound:STRING} -> StreamSelection + REDUCE = 37; // Sequence, Function(2) -> DATUM + MAP = 38; // Sequence, Function(1) -> Sequence + // The arity of the function should be + // Sequence..., Function(sizeof...(Sequence)) -> Sequence + + // Filter a sequence with either a function or a shortcut + // object (see API docs for details). The body of FILTER is + // wrapped in an implicit `.default(false)`, and you can + // change the default value by specifying the `default` + // optarg. If you make the default `r.error`, all errors + // caught by `default` will be rethrown as if the `default` + // did not exist. + FILTER = 39; // Sequence, Function(1), {default:DATUM} -> Sequence | + // Sequence, OBJECT, {default:DATUM} -> Sequence + // Map a function over a sequence and then concatenate the results together. + CONCAT_MAP = 40; // Sequence, Function(1) -> Sequence + // Order a sequence based on one or more attributes. + ORDER_BY = 41; // Sequence, (!STRING | Ordering)... -> Sequence + // Get all distinct elements of a sequence (like `uniq`). + DISTINCT = 42; // Sequence -> Sequence + // Count the number of elements in a sequence, or only the elements that match + // a given filter. + COUNT = 43; // Sequence -> NUMBER | Sequence, DATUM -> NUMBER | Sequence, Function(1) -> NUMBER + IS_EMPTY = 86; // Sequence -> BOOL + // Take the union of multiple sequences (preserves duplicate elements! (use distinct)). + UNION = 44; // Sequence... -> Sequence + // Get the Nth element of a sequence. + NTH = 45; // Sequence, NUMBER -> DATUM + // do NTH or GET_FIELD depending on target object + BRACKET = 170; // Sequence | OBJECT, NUMBER | STRING -> DATUM + // OBSOLETE_GROUPED_MAPREDUCE = 46; + // OBSOLETE_GROUPBY = 47; + + INNER_JOIN = 48; // Sequence, Sequence, Function(2) -> Sequence + OUTER_JOIN = 49; // Sequence, Sequence, Function(2) -> Sequence + // An inner-join that does an equality comparison on two attributes. + EQ_JOIN = 50; // Sequence, !STRING, Sequence, {index:!STRING} -> Sequence + ZIP = 72; // Sequence -> Sequence + RANGE = 173; // -> Sequence [0, +inf) + // NUMBER -> Sequence [0, a) + // NUMBER, NUMBER -> Sequence [a, b) + + // Array Ops + // Insert an element in to an array at a given index. + INSERT_AT = 82; // ARRAY, NUMBER, DATUM -> ARRAY + // Remove an element at a given index from an array. + DELETE_AT = 83; // ARRAY, NUMBER -> ARRAY | + // ARRAY, NUMBER, NUMBER -> ARRAY + // Change the element at a given index of an array. + CHANGE_AT = 84; // ARRAY, NUMBER, DATUM -> ARRAY + // Splice one array in to another array. + SPLICE_AT = 85; // ARRAY, NUMBER, ARRAY -> ARRAY + + // * Type Ops + // Coerces a datum to a named type (e.g. "bool"). + // If you previously used `stream_to_array`, you should use this instead + // with the type "array". + COERCE_TO = 51; // Top, STRING -> Top + // Returns the named type of a datum (e.g. TYPE_OF(true) = "BOOL") + TYPE_OF = 52; // Top -> STRING + + // * Write Ops (the OBJECTs contain data about number of errors etc.) + // Updates all the rows in a selection. Calls its Function with the row + // to be updated, and then merges the result of that call. + UPDATE = 53; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | + // SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | + // StreamSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | + // SingleSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT + // Deletes all the rows in a selection. + DELETE = 54; // StreamSelection, {durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection -> OBJECT + // Replaces all the rows in a selection. Calls its Function with the row + // to be replaced, and then discards it and stores the result of that + // call. + REPLACE = 55; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT + // Inserts into a table. If `conflict` is replace, overwrites + // entries with the same primary key. If `conflict` is + // update, does an update on the entry. If `conflict` is + // error, or is omitted, conflicts will trigger an error. + INSERT = 56; // Table, OBJECT, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT | Table, Sequence, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT + + // * Administrative OPs + // Creates a database with a particular name. + DB_CREATE = 57; // STRING -> OBJECT + // Drops a database with a particular name. + DB_DROP = 58; // STRING -> OBJECT + // Lists all the databases by name. (Takes no arguments) + DB_LIST = 59; // -> ARRAY + // Creates a table with a particular name in a particular + // database. (You may omit the first argument to use the + // default database.) + TABLE_CREATE = 60; // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT + // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT + // STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT + // STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT + // Drops a table with a particular name from a particular + // database. (You may omit the first argument to use the + // default database.) + TABLE_DROP = 61; // Database, STRING -> OBJECT + // STRING -> OBJECT + // Lists all the tables in a particular database. (You may + // omit the first argument to use the default database.) + TABLE_LIST = 62; // Database -> ARRAY + // -> ARRAY + // Returns the row in the `rethinkdb.table_config` or `rethinkdb.db_config` table + // that corresponds to the given database or table. + CONFIG = 174; // Database -> SingleSelection + // Table -> SingleSelection + // Returns the row in the `rethinkdb.table_status` table that corresponds to the + // given table. + STATUS = 175; // Table -> SingleSelection + // Called on a table, waits for that table to be ready for read/write operations. + // Called on a database, waits for all of the tables in the database to be ready. + // Returns the corresponding row or rows from the `rethinkdb.table_status` table. + WAIT = 177; // Table -> OBJECT + // Database -> OBJECT + // Generates a new config for the given table, or all tables in the given database + // The `shards` and `replicas` arguments are required + RECONFIGURE = 176; // Database, {shards:NUMBER, replicas:NUMBER[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT + // Database, {shards:NUMBER, replicas:OBJECT[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT + // Table, {shards:NUMBER, replicas:NUMBER[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT + // Table, {shards:NUMBER, replicas:OBJECT[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT + // Balances the table's shards but leaves everything else the same. Can also be + // applied to an entire database at once. + REBALANCE = 179; // Table -> OBJECT + // Database -> OBJECT + + // Ensures that previously issued soft-durability writes are complete and + // written to disk. + SYNC = 138; // Table -> OBJECT + + // * Secondary indexes OPs + // Creates a new secondary index with a particular name and definition. + INDEX_CREATE = 75; // Table, STRING, Function(1), {multi:BOOL} -> OBJECT + // Drops a secondary index with a particular name from the specified table. + INDEX_DROP = 76; // Table, STRING -> OBJECT + // Lists all secondary indexes on a particular table. + INDEX_LIST = 77; // Table -> ARRAY + // Gets information about whether or not a set of indexes are ready to + // be accessed. Returns a list of objects that look like this: + // {index:STRING, ready:BOOL[, blocks_processed:NUMBER, blocks_total:NUMBER]} + INDEX_STATUS = 139; // Table, STRING... -> ARRAY + // Blocks until a set of indexes are ready to be accessed. Returns the + // same values INDEX_STATUS. + INDEX_WAIT = 140; // Table, STRING... -> ARRAY + // Renames the given index to a new name + INDEX_RENAME = 156; // Table, STRING, STRING, {overwrite:BOOL} -> OBJECT + + // * Control Operators + // Calls a function on data + FUNCALL = 64; // Function(*), DATUM... -> DATUM + // Executes its first argument, and returns its second argument if it + // got [true] or its third argument if it got [false] (like an `if` + // statement). + BRANCH = 65; // BOOL, Top, Top -> Top + // Returns true if any of its arguments returns true (short-circuits). + OR = 66; // BOOL... -> BOOL + // Returns true if all of its arguments return true (short-circuits). + AND = 67; // BOOL... -> BOOL + // Calls its Function with each entry in the sequence + // and executes the array of terms that Function returns. + FOR_EACH = 68; // Sequence, Function(1) -> OBJECT + +//////////////////////////////////////////////////////////////////////////////// +////////// Special Terms +//////////////////////////////////////////////////////////////////////////////// + + // An anonymous function. Takes an array of numbers representing + // variables (see [VAR] above), and a [Term] to execute with those in + // scope. Returns a function that may be passed an array of arguments, + // then executes the Term with those bound to the variable names. The + // user will never construct this directly. We use it internally for + // things like `map` which take a function. The "arity" of a [Function] is + // the number of arguments it takes. + // For example, here's what `_X_.map{|x| x+2}` turns into: + // Term { + // type = MAP; + // args = [_X_, + // Term { + // type = Function; + // args = [Term { + // type = DATUM; + // datum = Datum { + // type = R_ARRAY; + // r_array = [Datum { type = R_NUM; r_num = 1; }]; + // }; + // }, + // Term { + // type = ADD; + // args = [Term { + // type = VAR; + // args = [Term { + // type = DATUM; + // datum = Datum { type = R_NUM; + // r_num = 1}; + // }]; + // }, + // Term { + // type = DATUM; + // datum = Datum { type = R_NUM; r_num = 2; }; + // }]; + // }]; + // }]; + FUNC = 69; // ARRAY, Top -> ARRAY -> Top + + // Indicates to ORDER_BY that this attribute is to be sorted in ascending order. + ASC = 73; // !STRING -> Ordering + // Indicates to ORDER_BY that this attribute is to be sorted in descending order. + DESC = 74; // !STRING -> Ordering + + // Gets info about anything. INFO is most commonly called on tables. + INFO = 79; // Top -> OBJECT + + // `a.match(b)` returns a match object if the string `a` + // matches the regular expression `b`. + MATCH = 97; // STRING, STRING -> DATUM + + // Change the case of a string. + UPCASE = 141; // STRING -> STRING + DOWNCASE = 142; // STRING -> STRING + + // Select a number of elements from sequence with uniform distribution. + SAMPLE = 81; // Sequence, NUMBER -> Sequence + + // Evaluates its first argument. If that argument returns + // NULL or throws an error related to the absence of an + // expected value (for instance, accessing a non-existent + // field or adding NULL to an integer), DEFAULT will either + // return its second argument or execute it if it's a + // function. If the second argument is a function, it will be + // passed either the text of the error or NULL as its + // argument. + DEFAULT = 92; // Top, Top -> Top + + // Parses its first argument as a json string and returns it as a + // datum. + JSON = 98; // STRING -> DATUM + // Returns the datum as a JSON string. + // N.B.: we would really prefer this be named TO_JSON and that exists as + // an alias in Python and JavaScript drivers; however it conflicts with the + // standard `to_json` method defined by Ruby's standard json library. + TO_JSON_STRING = 172; // DATUM -> STRING + + // Parses its first arguments as an ISO 8601 time and returns it as a + // datum. + ISO8601 = 99; // STRING -> PSEUDOTYPE(TIME) + // Prints a time as an ISO 8601 time. + TO_ISO8601 = 100; // PSEUDOTYPE(TIME) -> STRING + + // Returns a time given seconds since epoch in UTC. + EPOCH_TIME = 101; // NUMBER -> PSEUDOTYPE(TIME) + // Returns seconds since epoch in UTC given a time. + TO_EPOCH_TIME = 102; // PSEUDOTYPE(TIME) -> NUMBER + + // The time the query was received by the server. + NOW = 103; // -> PSEUDOTYPE(TIME) + // Puts a time into an ISO 8601 timezone. + IN_TIMEZONE = 104; // PSEUDOTYPE(TIME), STRING -> PSEUDOTYPE(TIME) + // a.during(b, c) returns whether a is in the range [b, c) + DURING = 105; // PSEUDOTYPE(TIME), PSEUDOTYPE(TIME), PSEUDOTYPE(TIME) -> BOOL + // Retrieves the date portion of a time. + DATE = 106; // PSEUDOTYPE(TIME) -> PSEUDOTYPE(TIME) + // x.time_of_day == x.date - x + TIME_OF_DAY = 126; // PSEUDOTYPE(TIME) -> NUMBER + // Returns the timezone of a time. + TIMEZONE = 127; // PSEUDOTYPE(TIME) -> STRING + + // These access the various components of a time. + YEAR = 128; // PSEUDOTYPE(TIME) -> NUMBER + MONTH = 129; // PSEUDOTYPE(TIME) -> NUMBER + DAY = 130; // PSEUDOTYPE(TIME) -> NUMBER + DAY_OF_WEEK = 131; // PSEUDOTYPE(TIME) -> NUMBER + DAY_OF_YEAR = 132; // PSEUDOTYPE(TIME) -> NUMBER + HOURS = 133; // PSEUDOTYPE(TIME) -> NUMBER + MINUTES = 134; // PSEUDOTYPE(TIME) -> NUMBER + SECONDS = 135; // PSEUDOTYPE(TIME) -> NUMBER + + // Construct a time from a date and optional timezone or a + // date+time and optional timezone. + TIME = 136; // NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | + // NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | + + // Constants for ISO 8601 days of the week. + MONDAY = 107; // -> 1 + TUESDAY = 108; // -> 2 + WEDNESDAY = 109; // -> 3 + THURSDAY = 110; // -> 4 + FRIDAY = 111; // -> 5 + SATURDAY = 112; // -> 6 + SUNDAY = 113; // -> 7 + + // Constants for ISO 8601 months. + JANUARY = 114; // -> 1 + FEBRUARY = 115; // -> 2 + MARCH = 116; // -> 3 + APRIL = 117; // -> 4 + MAY = 118; // -> 5 + JUNE = 119; // -> 6 + JULY = 120; // -> 7 + AUGUST = 121; // -> 8 + SEPTEMBER = 122; // -> 9 + OCTOBER = 123; // -> 10 + NOVEMBER = 124; // -> 11 + DECEMBER = 125; // -> 12 + + // Indicates to MERGE to replace the other object rather than merge it. + LITERAL = 137; // JSON -> Merging + + // SEQUENCE, STRING -> GROUPED_SEQUENCE | SEQUENCE, FUNCTION -> GROUPED_SEQUENCE + GROUP = 144; + SUM = 145; + AVG = 146; + MIN = 147; + MAX = 148; + + // `str.split()` splits on whitespace + // `str.split(" ")` splits on spaces only + // `str.split(" ", 5)` splits on spaces with at most 5 results + // `str.split(nil, 5)` splits on whitespace with at most 5 results + SPLIT = 149; // STRING -> ARRAY | STRING, STRING -> ARRAY | STRING, STRING, NUMBER -> ARRAY | STRING, NULL, NUMBER -> ARRAY + + UNGROUP = 150; // GROUPED_DATA -> ARRAY + + // Takes a range of numbers and returns a random number within the range + RANDOM = 151; // NUMBER, NUMBER {float:BOOL} -> DATUM + + CHANGES = 152; // TABLE -> STREAM + ARGS = 154; // ARRAY -> SPECIAL (used to splice arguments) + + // BINARY is client-only at the moment, it is not supported on the server + BINARY = 155; // STRING -> PSEUDOTYPE(BINARY) + + GEOJSON = 157; // OBJECT -> PSEUDOTYPE(GEOMETRY) + TO_GEOJSON = 158; // PSEUDOTYPE(GEOMETRY) -> OBJECT + POINT = 159; // NUMBER, NUMBER -> PSEUDOTYPE(GEOMETRY) + LINE = 160; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY) + POLYGON = 161; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY) + DISTANCE = 162; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) {geo_system:STRING, unit:STRING} -> NUMBER + INTERSECTS = 163; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL + INCLUDES = 164; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL + CIRCLE = 165; // PSEUDOTYPE(GEOMETRY), NUMBER {num_vertices:NUMBER, geo_system:STRING, unit:STRING, fill:BOOL} -> PSEUDOTYPE(GEOMETRY) + GET_INTERSECTING = 166; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING} -> StreamSelection + FILL = 167; // PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY) + GET_NEAREST = 168; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING, max_results:NUM, max_dist:NUM, geo_system:STRING, unit:STRING} -> ARRAY + POLYGON_SUB = 171; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY) + + // Constants for specifying key ranges + MINVAL = 180; + MAXVAL = 181; + } + optional TermType type = 1; + + // This is only used when type is DATUM. + optional Datum datum = 2; + + repeated Term args = 3; // Holds the positional arguments of the query. + message AssocPair { + optional string key = 1; + optional Term val = 2; + } + repeated AssocPair optargs = 4; // Holds the optional arguments of the query. + // (Note that the order of the optional arguments doesn't matter; think of a + // Hash.) + + extensions 10000 to 20000; +} + +//////////////////////////////////////////////////////////////////////////////// +// EXAMPLE // +//////////////////////////////////////////////////////////////////////////////// +// ```ruby +// r.table('tbl', {:use_outdated => true}).insert([{:id => 0}, {:id => 1}]) +// ``` +// Would turn into: +// Term { +// type = INSERT; +// args = [Term { +// type = TABLE; +// args = [Term { +// type = DATUM; +// datum = Datum { type = R_STR; r_str = "tbl"; }; +// }]; +// optargs = [["use_outdated", +// Term { +// type = DATUM; +// datum = Datum { type = R_BOOL; r_bool = true; }; +// }]]; +// }, +// Term { +// type = MAKE_ARRAY; +// args = [Term { +// type = DATUM; +// datum = Datum { type = R_OBJECT; r_object = [["id", 0]]; }; +// }, +// Term { +// type = DATUM; +// datum = Datum { type = R_OBJECT; r_object = [["id", 1]]; }; +// }]; +// }] +// } +// And the server would reply: +// Response { +// type = SUCCESS_ATOM; +// token = 1; +// response = [Datum { type = R_OBJECT; r_object = [["inserted", 2]]; }]; +// } +// Or, if there were an error: +// Response { +// type = RUNTIME_ERROR; +// token = 1; +// response = [Datum { type = R_STR; r_str = "The table `tbl` doesn't exist!"; }]; +// backtrace = [Frame { type = POS; pos = 0; }, Frame { type = POS; pos = 0; }]; +// } diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go new file mode 100644 index 000000000..00ff80f0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go @@ -0,0 +1,225 @@ +package types + +import ( + "fmt" +) + +type Geometry struct { + Type string + Point Point + Line Line + Lines Lines +} + +func (g Geometry) MarshalRQL() (interface{}, error) { + switch g.Type { + case "Point": + return g.Point.MarshalRQL() + case "LineString": + return g.Line.MarshalRQL() + case "Polygon": + return g.Lines.MarshalRQL() + default: + return nil, fmt.Errorf("pseudo-type GEOMETRY object field 'type' %s is not valid", g.Type) + } +} + +func (g *Geometry) UnmarshalRQL(data interface{}) error { + if data, ok := data.(Geometry); ok { + g.Type = data.Type + g.Point = data.Point + g.Line = data.Line + g.Lines = data.Lines + + return nil + } + + m, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("pseudo-type GEOMETRY object is not valid") + } + + typ, ok := m["type"] + if !ok { + return fmt.Errorf("pseudo-type GEOMETRY object is not valid, expects 'type' field") + } + coords, ok := m["coordinates"] + if !ok { + return fmt.Errorf("pseudo-type GEOMETRY object is not valid, expects 'coordinates' field") + } + + var err error + switch typ { + case "Point": + g.Type = "Point" + g.Point, err = UnmarshalPoint(coords) + case "LineString": + g.Type = "LineString" + g.Line, err = UnmarshalLineString(coords) + case "Polygon": + g.Type = "Polygon" + g.Lines, err = UnmarshalPolygon(coords) + default: + return fmt.Errorf("pseudo-type GEOMETRY object has invalid type") + } + + if err != nil { + return err + } + + return nil +} + +type Point struct { + Lon float64 + Lat float64 +} +type Line []Point +type Lines []Line + +func (p Point) Coords() interface{} { + return []interface{}{p.Lon, p.Lat} +} + +func (p Point) MarshalRQL() (interface{}, error) { + return map[string]interface{}{ + "$reql_type$": "GEOMETRY", + "coordinates": p.Coords(), + "type": "Point", + }, nil +} + +func (p *Point) UnmarshalRQL(data interface{}) error { + g := &Geometry{} + err := g.UnmarshalRQL(data) + if err != nil { + return err + } + if g.Type != "Point" { + return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "Point") + } + + p.Lat = g.Point.Lat + p.Lon = g.Point.Lon + + return nil +} + +func (l Line) Coords() interface{} { + coords := make([]interface{}, len(l)) + for i, point := range l { + coords[i] = point.Coords() + } + return coords +} + +func (l Line) MarshalRQL() (interface{}, error) { + return map[string]interface{}{ + "$reql_type$": "GEOMETRY", + "coordinates": l.Coords(), + "type": "LineString", + }, nil +} + +func (l *Line) UnmarshalRQL(data interface{}) error { + g := &Geometry{} + err := g.UnmarshalRQL(data) + if err != nil { + return err + } + if g.Type != "LineString" { + return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "LineString") + } + + *l = g.Line + + return nil +} + +func (l Lines) Coords() interface{} { + coords := make([]interface{}, len(l)) + for i, line := range l { + coords[i] = line.Coords() + } + return coords +} + +func (l Lines) MarshalRQL() (interface{}, error) { + return map[string]interface{}{ + "$reql_type$": "GEOMETRY", + "coordinates": l.Coords(), + "type": "Polygon", + }, nil +} + +func (l *Lines) UnmarshalRQL(data interface{}) error { + g := &Geometry{} + err := g.UnmarshalRQL(data) + if err != nil { + return err + } + if g.Type != "Polygon" { + return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "Polygon") + } + + *l = g.Lines + + return nil +} + +func UnmarshalPoint(v interface{}) (Point, error) { + coords, ok := v.([]interface{}) + if !ok { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + if len(coords) != 2 { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + lon, ok := coords[0].(float64) + if !ok { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + lat, ok := coords[1].(float64) + if !ok { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + + return Point{ + Lon: lon, + Lat: lat, + }, nil +} + +func UnmarshalLineString(v interface{}) (Line, error) { + points, ok := v.([]interface{}) + if !ok { + return Line{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + + var err error + line := make(Line, len(points)) + for i, coords := range points { + line[i], err = UnmarshalPoint(coords) + if err != nil { + return Line{}, err + } + } + return line, nil +} + +func UnmarshalPolygon(v interface{}) (Lines, error) { + lines, ok := v.([]interface{}) + if !ok { + return Lines{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + + var err error + polygon := make(Lines, len(lines)) + for i, line := range lines { + polygon[i], err = UnmarshalLineString(line) + if err != nil { + return Lines{}, err + } + } + return polygon, nil +} diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md new file mode 100644 index 000000000..2d1b3d932 --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md @@ -0,0 +1,34 @@ +circuit-breaker +=============== + +[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +The circuit-breaker resiliency pattern for golang. + +Creating a breaker takes three parameters: +- error threshold (for opening the breaker) +- success threshold (for closing the breaker) +- timeout (how long to keep the breaker open) + +```go +b := breaker.New(3, 1, 5*time.Second) + +for { + result := b.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case breaker.ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } +} +``` diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 000000000..f88ca7248 --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go new file mode 100644 index 000000000..b41308db6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go @@ -0,0 +1,196 @@ +package breaker + +import ( + "errors" + "testing" + "time" +) + +var errSomeError = errors.New("errSomeError") + +func alwaysPanics() error { + panic("foo") +} + +func returnsError() error { + return errSomeError +} + +func returnsSuccess() error { + return nil +} + +func TestBreakerErrorExpiry(t *testing.T) { + breaker := New(2, 1, 1*time.Second) + + for i := 0; i < 3; i++ { + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + time.Sleep(1 * time.Second) + } + + for i := 0; i < 3; i++ { + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + time.Sleep(1 * time.Second) + } +} + +func TestBreakerPanicsCountAsErrors(t *testing.T) { + breaker := New(3, 2, 1*time.Second) + + // three errors opens the breaker + for i := 0; i < 3; i++ { + func() { + defer func() { + val := recover() + if val.(string) != "foo" { + t.Error("incorrect panic") + } + }() + if err := breaker.Run(alwaysPanics); err != nil { + t.Error(err) + } + t.Error("shouldn't get here") + }() + } + + // breaker is open + for i := 0; i < 5; i++ { + if err := breaker.Run(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + } +} + +func TestBreakerStateTransitions(t *testing.T) { + breaker := New(3, 2, 1*time.Second) + + // three errors opens the breaker + for i := 0; i < 3; i++ { + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + } + + // breaker is open + for i := 0; i < 5; i++ { + if err := breaker.Run(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // one success works, but is not enough to fully close + if err := breaker.Run(returnsSuccess); err != nil { + t.Error(err) + } + // error works, but re-opens immediately + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + // breaker is open + if err := breaker.Run(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // two successes is enough to close it for good + for i := 0; i < 2; i++ { + if err := breaker.Run(returnsSuccess); err != nil { + t.Error(err) + } + } + // error works + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + // breaker is still closed + if err := breaker.Run(returnsSuccess); err != nil { + t.Error(err) + } +} + +func TestBreakerAsyncStateTransitions(t *testing.T) { + breaker := New(3, 2, 1*time.Second) + + // three errors opens the breaker + for i := 0; i < 3; i++ { + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + } + + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + + // breaker is open + for i := 0; i < 5; i++ { + if err := breaker.Go(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // one success works, but is not enough to fully close + if err := breaker.Go(returnsSuccess); err != nil { + t.Error(err) + } + // error works, but re-opens immediately + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + // breaker is open + if err := breaker.Go(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // two successes is enough to close it for good + for i := 0; i < 2; i++ { + if err := breaker.Go(returnsSuccess); err != nil { + t.Error(err) + } + } + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + // error works + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + // breaker is still closed + if err := breaker.Go(returnsSuccess); err != nil { + t.Error(err) + } +} + +func ExampleBreaker() { + breaker := New(3, 1, 5*time.Second) + + for { + result := breaker.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } + } +} diff --git a/Godeps/_workspace/src/github.com/eapache/queue/.gitignore b/Godeps/_workspace/src/github.com/eapache/queue/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/queue/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml b/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml new file mode 100644 index 000000000..235a40a49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false + +go: + - 1.2 + - 1.3 + - 1.4 diff --git a/Godeps/_workspace/src/github.com/eapache/queue/LICENSE b/Godeps/_workspace/src/github.com/eapache/queue/LICENSE new file mode 100644 index 000000000..d5f36dbca --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/eapache/queue/README.md b/Godeps/_workspace/src/github.com/eapache/queue/README.md new file mode 100644 index 000000000..8e782335c --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/queue/README.md @@ -0,0 +1,16 @@ +Queue +===== + +[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) +[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) +for guaranteed API stability. diff --git a/Godeps/_workspace/src/github.com/eapache/queue/queue.go b/Godeps/_workspace/src/github.com/eapache/queue/queue.go new file mode 100644 index 000000000..2dc8d9395 --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/queue/queue.go @@ -0,0 +1,88 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count*2) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + q.tail = (q.tail + 1) % len(q.buf) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. +func (q *Queue) Get(i int) interface{} { + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + return q.buf[(q.head+i)%len(q.buf)] +} + +// Remove removes the element from the front of the queue. If you actually +// want the element, call Peek first. This call panics if the queue is empty. +func (q *Queue) Remove() { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + q.buf[q.head] = nil + q.head = (q.head + 1) % len(q.buf) + q.count-- + if len(q.buf) > minQueueLen && q.count*4 == len(q.buf) { + q.resize() + } +} diff --git a/Godeps/_workspace/src/github.com/eapache/queue/queue_test.go b/Godeps/_workspace/src/github.com/eapache/queue/queue_test.go new file mode 100644 index 000000000..f2765c14d --- /dev/null +++ b/Godeps/_workspace/src/github.com/eapache/queue/queue_test.go @@ -0,0 +1,162 @@ +package queue + +import "testing" + +func TestQueueSimple(t *testing.T) { + q := New() + + for i := 0; i < minQueueLen; i++ { + q.Add(i) + } + for i := 0; i < minQueueLen; i++ { + if q.Peek().(int) != i { + t.Error("peek", i, "had value", q.Peek()) + } + q.Remove() + } +} + +func TestQueueWrapping(t *testing.T) { + q := New() + + for i := 0; i < minQueueLen; i++ { + q.Add(i) + } + for i := 0; i < 3; i++ { + q.Remove() + q.Add(minQueueLen + i) + } + + for i := 0; i < minQueueLen; i++ { + if q.Peek().(int) != i+3 { + t.Error("peek", i, "had value", q.Peek()) + } + q.Remove() + } +} + +func TestQueueLength(t *testing.T) { + q := New() + + if q.Length() != 0 { + t.Error("empty queue length not 0") + } + + for i := 0; i < 1000; i++ { + q.Add(i) + if q.Length() != i+1 { + t.Error("adding: queue with", i, "elements has length", q.Length()) + } + } + for i := 0; i < 1000; i++ { + q.Remove() + if q.Length() != 1000-i-1 { + t.Error("removing: queue with", 1000-i-i, "elements has length", q.Length()) + } + } +} + +func TestQueueGet(t *testing.T) { + q := New() + + for i := 0; i < 1000; i++ { + q.Add(i) + for j := 0; j < q.Length(); j++ { + if q.Get(j).(int) != j { + t.Errorf("index %d doesn't contain %d", j, j) + } + } + } +} + +func TestQueueGetOutOfRangePanics(t *testing.T) { + q := New() + + q.Add(1) + q.Add(2) + q.Add(3) + + assertPanics(t, "should panic when negative index", func() { + q.Get(-1) + }) + + assertPanics(t, "should panic when index greater than length", func() { + q.Get(4) + }) +} + +func TestQueuePeekOutOfRangePanics(t *testing.T) { + q := New() + + assertPanics(t, "should panic when peeking empty queue", func() { + q.Peek() + }) + + q.Add(1) + q.Remove() + + assertPanics(t, "should panic when peeking emptied queue", func() { + q.Peek() + }) +} + +func TestQueueRemoveOutOfRangePanics(t *testing.T) { + q := New() + + assertPanics(t, "should panic when removing empty queue", func() { + q.Remove() + }) + + q.Add(1) + q.Remove() + + assertPanics(t, "should panic when removing emptied queue", func() { + q.Remove() + }) +} + +func assertPanics(t *testing.T, name string, f func()) { + defer func() { + if r := recover(); r == nil { + t.Errorf("%s: didn't panic as expected", name) + } + }() + + f() +} + +// General warning: Go's benchmark utility (go test -bench .) increases the number of +// iterations until the benchmarks take a reasonable amount of time to run; memory usage +// is *NOT* considered. On my machine, these benchmarks hit around ~1GB before they've had +// enough, but if you have less than that available and start swapping, then all bets are off. + +func BenchmarkQueueSerial(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(nil) + } + for i := 0; i < b.N; i++ { + q.Peek() + q.Remove() + } +} + +func BenchmarkQueueGet(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + q.Get(i) + } +} + +func BenchmarkQueueTickTock(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(nil) + q.Peek() + q.Remove() + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml new file mode 100644 index 000000000..3926838ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml @@ -0,0 +1,11 @@ +language: go +sudo: false +go: + - 1.3.1 + - 1.4 + - tip +env: + - GOARCH=amd64 + - GOARCH=386 +script: + - make test diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS new file mode 100644 index 000000000..2febb1f03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS @@ -0,0 +1,91 @@ +# This is the official list of go-dockerclient authors for copyright purposes. + +Adam Bell-Hanssen +Aldrin Leal +Andreas Jaekle +Andrews Medina +Artem Sidorenko +Andy Goldstein +Ben Marini +Ben McCann +Brendan Fosberry +Brian Lalor +Brian Palmer +Burke Libbey +Carlos Diaz-Padron +Cezar Sa Espinola +Cheah Chu Yeow +cheneydeng +CMGS +Craig Jellick +Dan Williams +Daniel, Dao Quang Minh +Daniel Garcia +Darren Shepherd +Dave Choi +David Huie +Dawn Chen +Dinesh Subhraveti +Ed +Eric Anderson +Ewout Prangsma +Fabio Rehm +Fatih Arslan +Flavia Missi +Francisco Souza +Guillermo Ãlvarez Fernández +He Simei +Ivan Mikushin +James Bardin +Jari Kolehmainen +Jason Wilder +Jawher Moussa +Jean-Baptiste Dalido +Jeff Mitchell +Jeffrey Hulten +Johan Euphrosine +Kamil Domanski +Karan Misra +Kim, Hirokuni +Kyle Allan +Liron Levin +Liu Peng +Lucas Clemente +Lucas Weiblen +Mantas Matelis +Martin Sweeney +Máximo Cuadros Ortiz +Michal Fojtik +Mike Dillon +Mrunal Patel +Nick Ethier +Omeid Matten +Orivej Desh +Paul Bellamy +Paul Morie +Paul Weil +Peter Edge +Peter Jihoon Kim +Philippe LafoucrieÌ€re +Rafe Colton +Rob Miller +Robert Williamson +Salvador Gironès +Sam Rijs +Simon Eskildsen +Simon Menke +Skolos +Soulou +Sridhar Ratnakumar +Summer Mousa +Sunjin Lee +Tarsis Azevedo +Tim Schindler +Tobi Knaup +Tonic +ttyh061 +Victor Marmol +Vincenzo Prignano +Wiliam Souza +Ye Yin +Yuriy Bogdanov diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE new file mode 100644 index 000000000..706634474 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE @@ -0,0 +1,6 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +You can find the Docker license at the following link: +https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE new file mode 100644 index 000000000..4e11de100 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2015, go-dockerclient authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile new file mode 100644 index 000000000..b8c2c99b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile @@ -0,0 +1,47 @@ +.PHONY: \ + all \ + vendor \ + lint \ + vet \ + fmt \ + fmtcheck \ + pretest \ + test \ + cov \ + clean + +SRCS = $(shell git ls-files '*.go' | grep -v '^external/') +PKGS = ./. ./testing + +all: test + +vendor: + @ go get -v github.com/mjibson/party + party -d external -c -u + +lint: + @ go get -v github.com/golang/lint/golint + $(foreach file,$(SRCS),golint $(file) || exit;) + +vet: + @-go get -v golang.org/x/tools/cmd/vet + $(foreach pkg,$(PKGS),go vet $(pkg);) + +fmt: + gofmt -w $(SRCS) + +fmtcheck: + $(foreach file,$(SRCS),gofmt $(file) | diff -u $(file) - || exit;) + +pretest: lint vet fmtcheck + +test: pretest + $(foreach pkg,$(PKGS),go test $(pkg) || exit;) + +cov: + @ go get -v github.com/axw/gocov/gocov + @ go get golang.org/x/tools/cmd/cover + gocov test | gocov report + +clean: + $(foreach pkg,$(PKGS),go clean $(pkg) || exit;) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown new file mode 100644 index 000000000..a124d0b45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown @@ -0,0 +1,106 @@ +# go-dockerclient + +[![Drone](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest) +[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient) +[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) + +This package presents a client for the Docker remote API. It also provides +support for the extensions in the [Swarm API](https://docs.docker.com/swarm/API/). + +This package also provides support for docker's network API, which is a simple +passthrough to the libnetwork remote API. Note that docker's network API is +only available in docker 1.8 and above, and only enabled in docker if +DOCKER_EXPERIMENTAL is defined during the docker build process. + +For more details, check the [remote API documentation](http://docs.docker.com/en/latest/reference/api/docker_remote_api/). + +## Vendoring + +If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored, +please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient +is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339) +for details. + +## Example + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + endpoint := "unix:///var/run/docker.sock" + client, _ := docker.NewClient(endpoint) + imgs, _ := client.ListImages(docker.ListImagesOptions{All: false}) + for _, img := range imgs { + fmt.Println("ID: ", img.ID) + fmt.Println("RepoTags: ", img.RepoTags) + fmt.Println("Created: ", img.Created) + fmt.Println("Size: ", img.Size) + fmt.Println("VirtualSize: ", img.VirtualSize) + fmt.Println("ParentId: ", img.ParentID) + } +} +``` + +## Using with TLS + +In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters. + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + endpoint := "tcp://[ip]:[port]" + path := os.Getenv("DOCKER_CERT_PATH") + ca := fmt.Sprintf("%s/ca.pem", path) + cert := fmt.Sprintf("%s/cert.pem", path) + key := fmt.Sprintf("%s/key.pem", path) + client, _ := docker.NewTLSClient(endpoint, cert, key, ca) + // use client +} +``` + +If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables +`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv. + + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + client, _ := docker.NewClientFromEnv() + // use client +} +``` + +See the documentation for more details. + +## Developing + +All development commands can be seen in the [Makefile](Makefile). + +Commited code must pass: + +* [golint](https://github.com/golang/lint) +* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) +* [gofmt](https://golang.org/cmd/gofmt) +* [go test](https://golang.org/cmd/go/#hdr-Test_packages) + +Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go new file mode 100644 index 000000000..fccd55740 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go @@ -0,0 +1,133 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "strings" +) + +var AuthParseError error = errors.New("Failed to read authentication from dockercfg") + +// AuthConfiguration represents authentication options to use in the PushImage +// method. It represents the authentication in the Docker index server. +type AuthConfiguration struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +// AuthConfigurations represents authentication options to use for the +// PushImage method accommodating the new X-Registry-Config header +type AuthConfigurations struct { + Configs map[string]AuthConfiguration `json:"configs"` +} + +// dockerConfig represents a registry authentation configuration from the +// .dockercfg file. +type dockerConfig struct { + Auth string `json:"auth"` + Email string `json:"email"` +} + +// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the +// ~/.dockercfg file. +func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { + var r io.Reader + var err error + p := path.Join(os.Getenv("HOME"), ".docker", "config.json") + r, err = os.Open(p) + if err != nil { + p := path.Join(os.Getenv("HOME"), ".dockercfg") + r, err = os.Open(p) + if err != nil { + return nil, err + } + } + return NewAuthConfigurations(r) +} + +// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the +// same format as the .dockercfg file. +func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { + var auth *AuthConfigurations + confs, err := parseDockerConfig(r) + if err != nil { + return nil, err + } + auth, err = authConfigs(confs) + if err != nil { + return nil, err + } + return auth, nil +} + +func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { + buf := new(bytes.Buffer) + buf.ReadFrom(r) + byteData := buf.Bytes() + + var confsWrapper map[string]map[string]dockerConfig + if err := json.Unmarshal(byteData, &confsWrapper); err == nil { + if confs, ok := confsWrapper["auths"]; ok { + return confs, nil + } + } + + var confs map[string]dockerConfig + if err := json.Unmarshal(byteData, &confs); err != nil { + return nil, err + } + return confs, nil +} + +// authConfigs converts a dockerConfigs map to a AuthConfigurations object. +func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { + c := &AuthConfigurations{ + Configs: make(map[string]AuthConfiguration), + } + for reg, conf := range confs { + data, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + return nil, err + } + userpass := strings.Split(string(data), ":") + if len(userpass) != 2 { + return nil, AuthParseError + } + c.Configs[reg] = AuthConfiguration{ + Email: conf.Email, + Username: userpass[0], + Password: userpass[1], + ServerAddress: reg, + } + } + return c, nil +} + +// AuthCheck validates the given credentials. It returns nil if successful. +// +// See https://goo.gl/vPoEfJ for more details. +func (c *Client) AuthCheck(conf *AuthConfiguration) error { + if conf == nil { + return fmt.Errorf("conf is nil") + } + body, statusCode, err := c.do("POST", "/auth", doOptions{data: conf}) + if err != nil { + return err + } + if statusCode > 400 { + return fmt.Errorf("auth error (%d): %s", statusCode, body) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go new file mode 100644 index 000000000..fc0ffab84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go @@ -0,0 +1,91 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/base64" + "fmt" + "net/http" + "strings" + "testing" +) + +func TestAuthLegacyConfig(t *testing.T) { + auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) + read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) + ac, err := NewAuthConfigurations(read) + if err != nil { + t.Error(err) + } + c, ok := ac.Configs["docker.io"] + if !ok { + t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") + } + if got, want := c.Email, "user@example.com"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Username, "user"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Password, "pass"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.ServerAddress, "docker.io"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) + } +} + +func TestAuthBadConfig(t *testing.T) { + auth := base64.StdEncoding.EncodeToString([]byte("userpass")) + read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) + ac, err := NewAuthConfigurations(read) + if err != AuthParseError { + t.Errorf("Incorrect error returned %v\n", err) + } + if ac != nil { + t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac) + } +} + +func TestAuthConfig(t *testing.T) { + auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) + read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth)) + ac, err := NewAuthConfigurations(read) + if err != nil { + t.Error(err) + } + c, ok := ac.Configs["docker.io"] + if !ok { + t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") + } + if got, want := c.Email, "user@example.com"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Username, "user"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.Password, "pass"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) + } + if got, want := c.ServerAddress, "docker.io"; got != want { + t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) + } +} + +func TestAuthCheck(t *testing.T) { + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + if err := client.AuthCheck(nil); err == nil { + t.Fatalf("expected error on nil auth config") + } + // test good auth + if err := client.AuthCheck(&AuthConfiguration{}); err != nil { + t.Fatal(err) + } + *fakeRT = FakeRoundTripper{status: http.StatusUnauthorized} + if err := client.AuthCheck(&AuthConfiguration{}); err == nil { + t.Fatal("expected failure from unauthorized auth") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go new file mode 100644 index 000000000..a4864db83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go @@ -0,0 +1,144 @@ +package docker + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "os" + "reflect" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" +) + +func TestBuildImageMultipleContextsError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + InputStream: &buf, + OutputStream: &buf, + ContextDir: "testing/data", + } + err := client.BuildImage(opts) + if err != ErrMultipleContexts { + t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error") + } +} + +func TestBuildImageContextDirDockerignoreParsing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + OutputStream: &buf, + ContextDir: "testing/data", + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + reqBody := fakeRT.requests[0].Body + tmpdir, err := unpackBodyTarball(reqBody) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := os.RemoveAll(tmpdir); err != nil { + t.Fatal(err) + } + }() + + files, err := ioutil.ReadDir(tmpdir) + if err != nil { + t.Fatal(err) + } + + foundFiles := []string{} + for _, file := range files { + foundFiles = append(foundFiles, file.Name()) + } + + expectedFiles := []string{ + ".dockerignore", + "Dockerfile", + "barfile", + "ca.pem", + "cert.pem", + "key.pem", + "server.pem", + "serverkey.pem", + "symlink", + } + + if !reflect.DeepEqual(expectedFiles, foundFiles) { + t.Errorf( + "BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v", + expectedFiles, foundFiles, + ) + } +} + +func TestBuildImageSendXRegistryConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + OutputStream: &buf, + ContextDir: "testing/data", + AuthConfigs: AuthConfigurations{ + Configs: map[string]AuthConfiguration{ + "quay.io": { + Username: "foo", + Password: "bar", + Email: "baz", + ServerAddress: "quay.io", + }, + }, + }, + } + + encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg==" + + if err := client.BuildImage(opts); err != nil { + t.Fatal(err) + } + + xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0] + if xRegistryConfig != encodedConfig { + t.Errorf( + "BuildImage: X-Registry-Config not set currectly: expected %q, got %q", + encodedConfig, + xRegistryConfig, + ) + } +} + +func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) { + tmpdir, err = ioutil.TempDir("", "go-dockerclient-test") + if err != nil { + return + } + err = archive.Untar(req, tmpdir, &archive.TarOptions{ + Compression: archive.Uncompressed, + NoLchown: true, + }) + return +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go new file mode 100644 index 000000000..e7b056c3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go @@ -0,0 +1,43 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "fmt" + +// ChangeType is a type for constants indicating the type of change +// in a container +type ChangeType int + +const ( + // ChangeModify is the ChangeType for container modifications + ChangeModify ChangeType = iota + + // ChangeAdd is the ChangeType for additions to a container + ChangeAdd + + // ChangeDelete is the ChangeType for deletions from a container + ChangeDelete +) + +// Change represents a change in a container. +// +// See http://goo.gl/QkW9sH for more details. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go new file mode 100644 index 000000000..7c2ec30f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "testing" +) + +func TestChangeString(t *testing.T) { + var tests = []struct { + change Change + expected string + }{ + {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"}, + {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"}, + {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"}, + {Change{"/etc/passwd", 33}, " /etc/passwd"}, + } + for _, tt := range tests { + if got := tt.change.String(); got != tt.expected { + t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go new file mode 100644 index 000000000..986bbb3d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go @@ -0,0 +1,835 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package docker provides a client for the Docker remote API. +// +// See http://goo.gl/G3plxW for more details on the remote API. +package docker + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" +) + +const userAgent = "go-dockerclient" + +var ( + // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. + ErrInvalidEndpoint = errors.New("invalid endpoint") + + // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. + ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") + + apiVersion112, _ = NewAPIVersion("1.12") +) + +// APIVersion is an internal representation of a version of the Remote API. +type APIVersion []int + +// NewAPIVersion returns an instance of APIVersion for the given string. +// +// The given string must be in the form .., where , +// and are integer numbers. +func NewAPIVersion(input string) (APIVersion, error) { + if !strings.Contains(input, ".") { + return nil, fmt.Errorf("Unable to parse version %q", input) + } + arr := strings.Split(input, ".") + ret := make(APIVersion, len(arr)) + var err error + for i, val := range arr { + ret[i], err = strconv.Atoi(val) + if err != nil { + return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) + } + } + return ret, nil +} + +func (version APIVersion) String() string { + var str string + for i, val := range version { + str += strconv.Itoa(val) + if i < len(version)-1 { + str += "." + } + } + return str +} + +// LessThan is a function for comparing APIVersion structs +func (version APIVersion) LessThan(other APIVersion) bool { + return version.compare(other) < 0 +} + +// LessThanOrEqualTo is a function for comparing APIVersion structs +func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { + return version.compare(other) <= 0 +} + +// GreaterThan is a function for comparing APIVersion structs +func (version APIVersion) GreaterThan(other APIVersion) bool { + return version.compare(other) > 0 +} + +// GreaterThanOrEqualTo is a function for comparing APIVersion structs +func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { + return version.compare(other) >= 0 +} + +func (version APIVersion) compare(other APIVersion) int { + for i, v := range version { + if i <= len(other)-1 { + otherVersion := other[i] + + if v < otherVersion { + return -1 + } else if v > otherVersion { + return 1 + } + } + } + if len(version) > len(other) { + return 1 + } + if len(version) < len(other) { + return -1 + } + return 0 +} + +// Client is the basic type of this package. It provides methods for +// interaction with the API. +type Client struct { + SkipServerVersionCheck bool + HTTPClient *http.Client + TLSConfig *tls.Config + + endpoint string + endpointURL *url.URL + eventMonitor *eventMonitoringState + requestedAPIVersion APIVersion + serverAPIVersion APIVersion + expectedAPIVersion APIVersion +} + +// NewClient returns a Client instance ready for communication with the given +// server endpoint. It will use the latest remote API version available in the +// server. +func NewClient(endpoint string) (*Client, error) { + client, err := NewVersionedClient(endpoint, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates . It will use the latest remote API version +// available in the server. +func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { + client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file). It will use the latest remote API version available in the server. +func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { + client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClient returns a Client instance ready for communication with +// the given server endpoint, using a specific remote API version. +func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, false) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + return &Client{ + HTTPClient: http.DefaultClient, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + }, nil +} + +// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient. +func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) +} + +// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates, using a specific remote API version. +func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + certPEMBlock, err := ioutil.ReadFile(cert) + if err != nil { + return nil, err + } + keyPEMBlock, err := ioutil.ReadFile(key) + if err != nil { + return nil, err + } + caPEMCert, err := ioutil.ReadFile(ca) + if err != nil { + return nil, err + } + return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) +} + +// NewClientFromEnv returns a Client instance ready for communication created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewClientFromEnv() (*Client, error) { + client, err := NewVersionedClientFromEnv("") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, +// and using a specific remote API version. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { + dockerEnv, err := getDockerEnv() + if err != nil { + return nil, err + } + dockerHost := dockerEnv.dockerHost + if dockerEnv.dockerTLSVerify { + parts := strings.SplitN(dockerHost, "://", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) + } + dockerHost = fmt.Sprintf("https://%s", parts[1]) + cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") + key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") + ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") + return NewVersionedTLSClient(dockerHost, cert, key, ca, apiVersionString) + } + return NewVersionedClient(dockerHost, apiVersionString) +} + +// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file), using a specific remote API version. +func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, true) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + if certPEMBlock == nil || keyPEMBlock == nil { + return nil, errors.New("Both cert and key are required") + } + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} + if caPEMCert == nil { + tlsConfig.InsecureSkipVerify = true + } else { + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(caPEMCert) { + return nil, errors.New("Could not add RootCA pem") + } + tlsConfig.RootCAs = caPool + } + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + if err != nil { + return nil, err + } + return &Client{ + HTTPClient: &http.Client{Transport: tr}, + TLSConfig: tlsConfig, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + }, nil +} + +func (c *Client) checkAPIVersion() error { + serverAPIVersionString, err := c.getServerAPIVersionString() + if err != nil { + return err + } + c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) + if err != nil { + return err + } + if c.requestedAPIVersion == nil { + c.expectedAPIVersion = c.serverAPIVersion + } else { + c.expectedAPIVersion = c.requestedAPIVersion + } + return nil +} + +// Ping pings the docker server +// +// See http://goo.gl/stJENm for more details. +func (c *Client) Ping() error { + path := "/_ping" + body, status, err := c.do("GET", path, doOptions{}) + if err != nil { + return err + } + if status != http.StatusOK { + return newError(status, body) + } + return nil +} + +func (c *Client) getServerAPIVersionString() (version string, err error) { + body, status, err := c.do("GET", "/version", doOptions{}) + if err != nil { + return "", err + } + if status != http.StatusOK { + return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", status) + } + var versionResponse map[string]interface{} + err = json.Unmarshal(body, &versionResponse) + if err != nil { + return "", err + } + if version, ok := (versionResponse["ApiVersion"]).(string); ok { + return version, nil + } + return "", nil +} + +type doOptions struct { + data interface{} + forceJSON bool +} + +func (c *Client) do(method, path string, doOptions doOptions) ([]byte, int, error) { + var params io.Reader + if doOptions.data != nil || doOptions.forceJSON { + buf, err := json.Marshal(doOptions.data) + if err != nil { + return nil, -1, err + } + params = bytes.NewBuffer(buf) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, -1, err + } + } + req, err := http.NewRequest(method, c.getURL(path), params) + if err != nil { + return nil, -1, err + } + req.Header.Set("User-Agent", userAgent) + if doOptions.data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol == "unix" { + var dial net.Conn + dial, err = net.Dial(protocol, address) + if err != nil { + return nil, -1, err + } + defer dial.Close() + breader := bufio.NewReader(dial) + err = req.Write(dial) + if err != nil { + return nil, -1, err + } + resp, err = http.ReadResponse(breader, req) + } else { + resp, err = c.HTTPClient.Do(req) + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, resp.StatusCode, newError(resp.StatusCode, body) + } + return body, resp.StatusCode, nil +} + +type streamOptions struct { + setRawTerminal bool + rawJSONStream bool + useJSONDecoder bool + headers map[string]string + in io.Reader + stdout io.Writer + stderr io.Writer + // timeout is the inital connection timeout + timeout time.Duration +} + +func (c *Client) stream(method, path string, streamOptions streamOptions) error { + if (method == "POST" || method == "PUT") && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgent) + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + for key, val := range streamOptions.headers { + req.Header.Set(key, val) + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if streamOptions.stdout == nil { + streamOptions.stdout = ioutil.Discard + } + if streamOptions.stderr == nil { + streamOptions.stderr = ioutil.Discard + } + if protocol == "unix" { + dial, err := net.Dial(protocol, address) + if err != nil { + return err + } + defer dial.Close() + breader := bufio.NewReader(dial) + err = req.Write(dial) + if err != nil { + return err + } + + // ReadResponse may hang if server does not replay + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Now().Add(streamOptions.timeout)) + } + + if resp, err = http.ReadResponse(breader, req); err != nil { + // Cancel timeout for future I/O operations + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Time{}) + } + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return err + } + } else { + if resp, err = c.HTTPClient.Do(req); err != nil { + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return err + } + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return newError(resp.StatusCode, body) + } + if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" { + // if we want to get raw json stream, just copy it back to output + // without decoding it + if streamOptions.rawJSONStream { + _, err = io.Copy(streamOptions.stdout, resp.Body) + return err + } + dec := json.NewDecoder(resp.Body) + for { + var m jsonMessage + if err := dec.Decode(&m); err == io.EOF { + break + } else if err != nil { + return err + } + if m.Stream != "" { + fmt.Fprint(streamOptions.stdout, m.Stream) + } else if m.Progress != "" { + fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress) + } else if m.Error != "" { + return errors.New(m.Error) + } + if m.Status != "" { + fmt.Fprintln(streamOptions.stdout, m.Status) + } + } + } else { + if streamOptions.setRawTerminal { + _, err = io.Copy(streamOptions.stdout, resp.Body) + } else { + _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) + } + return err + } + return nil +} + +type hijackOptions struct { + success chan struct{} + setRawTerminal bool + in io.Reader + stdout io.Writer + stderr io.Writer + data interface{} +} + +func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error { + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + + var params io.Reader + if hijackOptions.data != nil { + buf, err := json.Marshal(hijackOptions.data) + if err != nil { + return err + } + params = bytes.NewBuffer(buf) + } + + if hijackOptions.stdout == nil { + hijackOptions.stdout = ioutil.Discard + } + if hijackOptions.stderr == nil { + hijackOptions.stderr = ioutil.Discard + } + req, err := http.NewRequest(method, c.getURL(path), params) + if err != nil { + return err + } + req.Header.Set("Content-Type", "plain/text") + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + if c.TLSConfig != nil && protocol != "unix" { + dial, err = tlsDial(protocol, address, c.TLSConfig) + if err != nil { + return err + } + } else { + dial, err = net.Dial(protocol, address) + if err != nil { + return err + } + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + clientconn.Do(req) + if hijackOptions.success != nil { + hijackOptions.success <- struct{}{} + <-hijackOptions.success + } + rwc, br := clientconn.Hijack() + defer rwc.Close() + errChanOut := make(chan error, 1) + errChanIn := make(chan error, 1) + exit := make(chan bool) + go func() { + defer close(exit) + defer close(errChanOut) + var err error + if hijackOptions.setRawTerminal { + // When TTY is ON, use regular copy + _, err = io.Copy(hijackOptions.stdout, br) + } else { + _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) + } + errChanOut <- err + }() + go func() { + if hijackOptions.in != nil { + _, err := io.Copy(rwc, hijackOptions.in) + errChanIn <- err + } else { + errChanIn <- nil + } + rwc.(interface { + CloseWrite() error + }).CloseWrite() + }() + <-exit + errIn := <-errChanIn + errOut := <-errChanOut + if errIn != nil { + return errIn + } + return errOut +} + +func (c *Client) getURL(path string) string { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == "unix" { + urlStr = "" + } + + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +type jsonMessage struct { + Status string `json:"status,omitempty"` + Progress string `json:"progress,omitempty"` + Error string `json:"error,omitempty"` + Stream string `json:"stream,omitempty"` +} + +func queryString(opts interface{}) string { + if opts == nil { + return "" + } + value := reflect.ValueOf(opts) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() != reflect.Struct { + return "" + } + items := url.Values(map[string][]string{}) + for i := 0; i < value.NumField(); i++ { + field := value.Type().Field(i) + if field.PkgPath != "" { + continue + } + key := field.Tag.Get("qs") + if key == "" { + key = strings.ToLower(field.Name) + } else if key == "-" { + continue + } + addQueryStringValue(items, key, value.Field(i)) + } + return items.Encode() +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) { + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + items.Add(key, "1") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.Int() > 0 { + items.Add(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Float32, reflect.Float64: + if v.Float() > 0 { + items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + case reflect.String: + if v.String() != "" { + items.Add(key, v.String()) + } + case reflect.Ptr: + if !v.IsNil() { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + case reflect.Map: + if len(v.MapKeys()) > 0 { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + case reflect.Array, reflect.Slice: + vLen := v.Len() + if vLen > 0 { + for i := 0; i < vLen; i++ { + addQueryStringValue(items, key, v.Index(i)) + } + } + } +} + +// Error represents failures in the API. It represents a failure from the API. +type Error struct { + Status int + Message string +} + +func newError(status int, body []byte) *Error { + return &Error{Status: status, Message: string(body)} +} + +func (e *Error) Error() string { + return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) +} + +func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, ErrInvalidEndpoint + } + if tls { + u.Scheme = "https" + } + switch u.Scheme { + case "unix": + return u, nil + case "http", "https", "tcp": + _, port, err := net.SplitHostPort(u.Host) + if err != nil { + if e, ok := err.(*net.AddrError); ok { + if e.Err == "missing port in address" { + return u, nil + } + } + return nil, ErrInvalidEndpoint + } + number, err := strconv.ParseInt(port, 10, 64) + if err == nil && number > 0 && number < 65536 { + if u.Scheme == "tcp" { + if number == 2376 { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + } + return u, nil + } + return nil, ErrInvalidEndpoint + default: + return nil, ErrInvalidEndpoint + } +} + +type dockerEnv struct { + dockerHost string + dockerTLSVerify bool + dockerCertPath string +} + +func getDockerEnv() (*dockerEnv, error) { + dockerHost := os.Getenv("DOCKER_HOST") + var err error + if dockerHost == "" { + dockerHost, err = getDefaultDockerHost() + if err != nil { + return nil, err + } + } + dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" + var dockerCertPath string + if dockerTLSVerify { + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + if dockerCertPath == "" { + home := homedir.Get() + if home == "" { + return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") + } + dockerCertPath = filepath.Join(home, ".docker") + dockerCertPath, err = filepath.Abs(dockerCertPath) + if err != nil { + return nil, err + } + } + } + return &dockerEnv{ + dockerHost: dockerHost, + dockerTLSVerify: dockerTLSVerify, + dockerCertPath: dockerCertPath, + }, nil +} + +func getDefaultDockerHost() (string, error) { + var defaultHost string + if runtime.GOOS != "windows" { + // If we do not have a host, default to unix socket + defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) + } else { + // If we do not have a host, default to TCP socket on Windows + defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) + } + return opts.ValidateHost(defaultHost) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go new file mode 100644 index 000000000..c00c3d30c --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go @@ -0,0 +1,422 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +func TestNewAPIClient(t *testing.T) { + endpoint := "http://localhost:4243" + client, err := NewClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if client.HTTPClient != http.DefaultClient { + t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient) + } + // test unix socket endpoints + endpoint = "unix:///var/run/docker.sock" + client, err = NewClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if !client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be true, got false") + } + if client.requestedAPIVersion != nil { + t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) + } +} + +func newTLSClient(endpoint string) (*Client, error) { + return NewTLSClient(endpoint, + "testing/data/cert.pem", + "testing/data/key.pem", + "testing/data/ca.pem") +} + +func TestNewTSLAPIClient(t *testing.T) { + endpoint := "https://localhost:4243" + client, err := newTLSClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if !client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be true, got false") + } + if client.requestedAPIVersion != nil { + t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) + } +} + +func TestNewVersionedClient(t *testing.T) { + endpoint := "http://localhost:4243" + client, err := NewVersionedClient(endpoint, "1.12") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if client.HTTPClient != http.DefaultClient { + t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient) + } + if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { + t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewTLSVersionedClient(t *testing.T) { + certPath := "testing/data/cert.pem" + keyPath := "testing/data/key.pem" + caPath := "testing/data/ca.pem" + endpoint := "https://localhost:4243" + client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" { + t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewTLSVersionedClientInvalidCA(t *testing.T) { + certPath := "testing/data/cert.pem" + keyPath := "testing/data/key.pem" + caPath := "testing/data/key.pem" + endpoint := "https://localhost:4243" + _, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") + if err == nil { + t.Errorf("Expected invalid ca at %s", caPath) + } +} + +func TestNewClientInvalidEndpoint(t *testing.T) { + cases := []string{ + "htp://localhost:3243", "http://localhost:a", "localhost:8080", + "", "localhost", "http://localhost:8080:8383", "http://localhost:65536", + "https://localhost:-20", + } + for _, c := range cases { + client, err := NewClient(c) + if client != nil { + t.Errorf("Want client for invalid endpoint, got %#v.", client) + } + if !reflect.DeepEqual(err, ErrInvalidEndpoint) { + t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err) + } + } +} + +func TestNewTLSClient(t *testing.T) { + var tests = []struct { + endpoint string + expected string + }{ + {"tcp://localhost:2376", "https"}, + {"tcp://localhost:2375", "https"}, + {"tcp://localhost:4000", "https"}, + {"http://localhost:4000", "https"}, + } + + for _, tt := range tests { + client, err := newTLSClient(tt.endpoint) + if err != nil { + t.Error(err) + } + got := client.endpointURL.Scheme + if got != tt.expected { + t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected) + } + } +} + +func TestGetURL(t *testing.T) { + var tests = []struct { + endpoint string + path string + expected string + }{ + {"http://localhost:4243/", "/", "http://localhost:4243/"}, + {"http://localhost:4243", "/", "http://localhost:4243/"}, + {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, + {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, + {"http://localhost:4243/////", "/", "http://localhost:4243/"}, + {"unix:///var/run/docker.socket", "/containers", "/containers"}, + } + for _, tt := range tests { + client, _ := NewClient(tt.endpoint) + client.endpoint = tt.endpoint + client.SkipServerVersionCheck = true + got := client.getURL(tt.path) + if got != tt.expected { + t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) + } + } +} + +func TestError(t *testing.T) { + err := newError(400, []byte("bad parameter")) + expected := Error{Status: 400, Message: "bad parameter"} + if !reflect.DeepEqual(expected, *err) { + t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err) + } + message := "API error (400): bad parameter" + if err.Error() != message { + t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error()) + } +} + +func TestQueryString(t *testing.T) { + v := float32(2.4) + f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64)) + jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`) + var tests = []struct { + input interface{} + want string + }{ + {&ListContainersOptions{All: true}, "all=1"}, + {ListContainersOptions{All: true}, "all=1"}, + {ListContainersOptions{Before: "something"}, "before=something"}, + {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"}, + {ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"}, + {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"}, + {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString}, + {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"}, + {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"}, + {dumb{T: 10, Y: 10.35000}, "y=10.35"}, + {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson}, + {nil, ""}, + {10, ""}, + {"not_a_struct", ""}, + } + for _, tt := range tests { + got := queryString(tt.input) + if got != tt.want { + t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got) + } + } +} + +func TestNewAPIVersionFailures(t *testing.T) { + var tests = []struct { + input string + expectedError string + }{ + {"1-0", `Unable to parse version "1-0"`}, + {"1.0-beta", `Unable to parse version "1.0-beta": "0-beta" is not an integer`}, + } + for _, tt := range tests { + v, err := NewAPIVersion(tt.input) + if v != nil { + t.Errorf("Expected version, got %v.", v) + } + if err.Error() != tt.expectedError { + t.Errorf("NewAPIVersion(%q): wrong error. Want %q. Got %q", tt.input, tt.expectedError, err.Error()) + } + } +} + +func TestAPIVersions(t *testing.T) { + var tests = []struct { + a string + b string + expectedALessThanB bool + expectedALessThanOrEqualToB bool + expectedAGreaterThanB bool + expectedAGreaterThanOrEqualToB bool + }{ + {"1.11", "1.11", false, true, false, true}, + {"1.10", "1.11", true, true, false, false}, + {"1.11", "1.10", false, false, true, true}, + + {"1.9", "1.11", true, true, false, false}, + {"1.11", "1.9", false, false, true, true}, + + {"1.1.1", "1.1", false, false, true, true}, + {"1.1", "1.1.1", true, true, false, false}, + + {"2.1", "1.1.1", false, false, true, true}, + {"2.1", "1.3.1", false, false, true, true}, + {"1.1.1", "2.1", true, true, false, false}, + {"1.3.1", "2.1", true, true, false, false}, + } + + for _, tt := range tests { + a, _ := NewAPIVersion(tt.a) + b, _ := NewAPIVersion(tt.b) + + if tt.expectedALessThanB && !a.LessThan(b) { + t.Errorf("Expected %#v < %#v", a, b) + } + if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) { + t.Errorf("Expected %#v <= %#v", a, b) + } + if tt.expectedAGreaterThanB && !a.GreaterThan(b) { + t.Errorf("Expected %#v > %#v", a, b) + } + if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) { + t.Errorf("Expected %#v >= %#v", a, b) + } + } +} + +func TestPing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + err := client.Ping() + if err != nil { + t.Fatal(err) + } +} + +func TestPingFailing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } + expectedErrMsg := "API error (500): " + if err.Error() != expectedErrMsg { + t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) + } +} + +func TestPingFailingWrongStatus(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted} + client := newTestClient(fakeRT) + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } + expectedErrMsg := "API error (202): " + if err.Error() != expectedErrMsg { + t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) + } +} + +func TestPingErrorWithUnixSocket(t *testing.T) { + go func() { + li, err := net.Listen("unix", "/tmp/echo.sock") + if err != nil { + t.Fatal(err) + } + defer li.Close() + if err != nil { + t.Fatalf("Expected to get listner, but failed: %#v", err) + } + + fd, err := li.Accept() + if err != nil { + t.Fatalf("Expected to accept connection, but failed: %#v", err) + } + + buf := make([]byte, 512) + nr, err := fd.Read(buf) + + // Create invalid response message to occur error + data := buf[0:nr] + for i := 0; i < 10; i++ { + data[i] = 63 + } + + _, err = fd.Write(data) + if err != nil { + t.Fatalf("Expected to write to socket, but failed: %#v", err) + } + + return + }() + + // Wait for unix socket to listen + time.Sleep(10 * time.Millisecond) + + endpoint := "unix:///tmp/echo.sock" + u, _ := parseEndpoint(endpoint, false) + client := Client{ + HTTPClient: http.DefaultClient, + endpoint: endpoint, + endpointURL: u, + SkipServerVersionCheck: true, + } + + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } +} + +type FakeRoundTripper struct { + message string + status int + header map[string]string + requests []*http.Request +} + +func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + body := strings.NewReader(rt.message) + rt.requests = append(rt.requests, r) + res := &http.Response{ + StatusCode: rt.status, + Body: ioutil.NopCloser(body), + Header: make(http.Header), + } + for k, v := range rt.header { + res.Header.Set(k, v) + } + return res, nil +} + +func (rt *FakeRoundTripper) Reset() { + rt.requests = nil +} + +type person struct { + Name string + Age int `json:"age"` +} + +type dumb struct { + T int `qs:"-"` + v int + W float32 + X int + Y float64 + Z int `qs:"zee"` + Person *person `qs:"p"` +} + +type fakeEndpointURL struct { + Scheme string +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go new file mode 100644 index 000000000..89430975b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go @@ -0,0 +1,1058 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// ErrContainerAlreadyExists is the error returned by CreateContainer when the +// container already exists. +var ErrContainerAlreadyExists = errors.New("container already exists") + +// ListContainersOptions specify parameters to the ListContainers function. +// +// See http://goo.gl/6Y4Gz7 for more details. +type ListContainersOptions struct { + All bool + Size bool + Limit int + Since string + Before string + Filters map[string][]string +} + +// APIPort is a type that represents a port mapping returned by the Docker API +type APIPort struct { + PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"` + PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty"` +} + +// APIContainers represents a container. +// +// See http://goo.gl/QeFH7U for more details. +type APIContainers struct { + ID string `json:"Id" yaml:"Id"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Command string `json:"Command,omitempty" yaml:"Command,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + Status string `json:"Status,omitempty" yaml:"Status,omitempty"` + Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` + Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` +} + +// ListContainers returns a slice of containers matching the given criteria. +// +// See http://goo.gl/6Y4Gz7 for more details. +func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { + path := "/containers/json?" + queryString(opts) + body, _, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + var containers []APIContainers + err = json.Unmarshal(body, &containers) + if err != nil { + return nil, err + } + return containers, nil +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +type Port string + +// Port returns the number of the port. +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +// Proto returns the name of the protocol. +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +// State represents the state of a container. +type State struct { + Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` + Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"` + Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"` + OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"` + Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` + Error string `json:"Error,omitempty" yaml:"Error,omitempty"` + StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"` + FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"` +} + +// String returns the string representation of a state. +func (s *State) String() string { + if s.Running { + if s.Paused { + return "paused" + } + return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt)) + } + return fmt.Sprintf("Exit %d", s.ExitCode) +} + +// PortBinding represents the host/container port mapping as returned in the +// `docker inspect` json +type PortBinding struct { + HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"` + HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"` +} + +// PortMapping represents a deprecated field in the `docker inspect` output, +// and its value as found in NetworkSettings should always be nil +type PortMapping map[string]string + +// NetworkSettings contains network-related information about a container +type NetworkSettings struct { + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` + Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"` + PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"` + Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` + SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` + LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"` + LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"` + SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"` + SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"` +} + +// PortMappingAPI translates the port mappings as contained in NetworkSettings +// into the format in which they would appear when returned by the API +func (settings *NetworkSettings) PortMappingAPI() []APIPort { + var mapping []APIPort + for port, bindings := range settings.Ports { + p, _ := parsePort(port.Port()) + if len(bindings) == 0 { + mapping = append(mapping, APIPort{ + PublicPort: int64(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + p, _ := parsePort(port.Port()) + h, _ := parsePort(binding.HostPort) + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + PublicPort: int64(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + return mapping +} + +func parsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// LogConfig defines the log driver type and the configuration for it. +type LogConfig struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty"` + Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"` +} + +// ULimit defines system-wide resource limitations +// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users. +type ULimit struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"` + Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"` +} + +// SwarmNode containers information about which Swarm node the container is on +type SwarmNode struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty"` + Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Container is the type encompasing everything about a container - its config, +// hostconfig, etc. +type Container struct { + ID string `json:"Id" yaml:"Id"` + + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` + + Path string `json:"Path,omitempty" yaml:"Path,omitempty"` + Args []string `json:"Args,omitempty" yaml:"Args,omitempty"` + + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` + State State `json:"State,omitempty" yaml:"State,omitempty"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + + Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"` + + NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"` + + SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"` + ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"` + HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"` + HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"` + LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` + + Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"` + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` + ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"` + + RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"` + + AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"` +} + +// RenameContainerOptions specify parameters to the RenameContainer function. +// +// See http://goo.gl/L00hoj for more details. +type RenameContainerOptions struct { + // ID of container to rename + ID string `qs:"-"` + + // New name + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// RenameContainer updates and existing containers name +// +// See http://goo.gl/L00hoj for more details. +func (c *Client) RenameContainer(opts RenameContainerOptions) error { + _, _, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{}) + return err +} + +// InspectContainer returns information about a container by its ID. +// +// See http://goo.gl/CxVuJ5 for more details. +func (c *Client) InspectContainer(id string) (*Container, error) { + path := "/containers/" + id + "/json" + body, status, err := c.do("GET", path, doOptions{}) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + if err != nil { + return nil, err + } + var container Container + err = json.Unmarshal(body, &container) + if err != nil { + return nil, err + } + return &container, nil +} + +// ContainerChanges returns changes in the filesystem of the given container. +// +// See http://goo.gl/QkW9sH for more details. +func (c *Client) ContainerChanges(id string) ([]Change, error) { + path := "/containers/" + id + "/changes" + body, status, err := c.do("GET", path, doOptions{}) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + if err != nil { + return nil, err + } + var changes []Change + err = json.Unmarshal(body, &changes) + if err != nil { + return nil, err + } + return changes, nil +} + +// CreateContainerOptions specify parameters to the CreateContainer function. +// +// See http://goo.gl/2xxQQK for more details. +type CreateContainerOptions struct { + Name string + Config *Config `qs:"-"` + HostConfig *HostConfig `qs:"-"` +} + +// CreateContainer creates a new container, returning the container instance, +// or an error in case of failure. +// +// See http://goo.gl/mErxNp for more details. +func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { + path := "/containers/create?" + queryString(opts) + body, status, err := c.do( + "POST", + path, + doOptions{ + data: struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` + }{ + opts.Config, + opts.HostConfig, + }, + }, + ) + + if status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if status == http.StatusConflict { + return nil, ErrContainerAlreadyExists + } + if err != nil { + return nil, err + } + var container Container + err = json.Unmarshal(body, &container) + if err != nil { + return nil, err + } + + container.Name = opts.Name + + return &container, nil +} + +// KeyValuePair is a type for generic key/value pairs as used in the Lxc +// configuration +type KeyValuePair struct { + Key string `json:"Key,omitempty" yaml:"Key,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty"` +} + +// RestartPolicy represents the policy for automatically restarting a container. +// +// Possible values are: +// +// - always: the docker daemon will always restart the container +// - on-failure: the docker daemon will restart the container on failures, at +// most MaximumRetryCount times +// - no: the docker daemon will not restart the container automatically +type RestartPolicy struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty"` + MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"` +} + +// AlwaysRestart returns a restart policy that tells the Docker daemon to +// always restart the container. +func AlwaysRestart() RestartPolicy { + return RestartPolicy{Name: "always"} +} + +// RestartOnFailure returns a restart policy that tells the Docker daemon to +// restart the container on failures, trying at most maxRetry times. +func RestartOnFailure(maxRetry int) RestartPolicy { + return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} +} + +// NeverRestart returns a restart policy that tells the Docker daemon to never +// restart the container on failures. +func NeverRestart() RestartPolicy { + return RestartPolicy{Name: "no"} +} + +// Device represents a device mapping between the Docker host and the +// container. +type Device struct { + PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"` + PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"` + CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"` +} + +// HostConfig contains the container options related to starting a container on +// a given host +type HostConfig struct { + Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` + CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` + CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` + ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` + LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` + PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` + PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only + DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` + ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` + VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` + IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` + PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` + UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` + Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` + LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` + ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` + CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` + CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` + Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` +} + +// StartContainer starts a container, returning an error in case of failure. +// +// See http://goo.gl/iM5GYs for more details. +func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { + path := "/containers/" + id + "/start" + _, status, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id, Err: err} + } + if status == http.StatusNotModified { + return &ContainerAlreadyRunning{ID: id} + } + if err != nil { + return err + } + return nil +} + +// StopContainer stops a container, killing it after the given timeout (in +// seconds). +// +// See http://goo.gl/EbcpXt for more details. +func (c *Client) StopContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) + _, status, err := c.do("POST", path, doOptions{}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if status == http.StatusNotModified { + return &ContainerNotRunning{ID: id} + } + if err != nil { + return err + } + return nil +} + +// RestartContainer stops a container, killing it after the given timeout (in +// seconds), during the stop process. +// +// See http://goo.gl/VOzR2n for more details. +func (c *Client) RestartContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) + _, status, err := c.do("POST", path, doOptions{}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// PauseContainer pauses the given container. +// +// See http://goo.gl/AM5t42 for more details. +func (c *Client) PauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/pause", id) + _, status, err := c.do("POST", path, doOptions{}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// UnpauseContainer unpauses the given container. +// +// See http://goo.gl/eBrNSL for more details. +func (c *Client) UnpauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/unpause", id) + _, status, err := c.do("POST", path, doOptions{}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// TopResult represents the list of processes running in a container, as +// returned by /containers//top. +// +// See http://goo.gl/qu4gse for more details. +type TopResult struct { + Titles []string + Processes [][]string +} + +// TopContainer returns processes running inside a container +// +// See http://goo.gl/qu4gse for more details. +func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { + var args string + var result TopResult + if psArgs != "" { + args = fmt.Sprintf("?ps_args=%s", psArgs) + } + path := fmt.Sprintf("/containers/%s/top%s", id, args) + body, status, err := c.do("GET", path, doOptions{}) + if status == http.StatusNotFound { + return result, &NoSuchContainer{ID: id} + } + if err != nil { + return result, err + } + err = json.Unmarshal(body, &result) + if err != nil { + return result, err + } + return result, nil +} + +// Stats represents container statistics, returned by /containers//stats. +// +// See http://goo.gl/DFMiYD for more details. +type Stats struct { + Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` + Network struct { + RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"` + RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"` + RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"` + TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"` + TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"` + RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"` + TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"` + TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"` + } `json:"network,omitempty" yaml:"network,omitempty"` + MemoryStats struct { + Stats struct { + TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"` + Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"` + MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"` + TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"` + Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"` + Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"` + TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"` + Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"` + Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"` + Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"` + TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"` + Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"` + TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"` + TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"` + TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"` + TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"` + RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"` + HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"` + TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"` + TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"` + ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"` + TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"` + TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"` + TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"` + InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"` + ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"` + Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` + InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` + TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` + } `json:"stats,omitempty" yaml:"stats,omitempty"` + MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` + Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` + Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"` + } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"` + BlkioStats struct { + IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"` + IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"` + IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"` + IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"` + IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"` + IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"` + IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"` + } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` +} + +// CPUStats is a stats entry for cpu stats +type CPUStats struct { + CPUUsage struct { + PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"` + UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"` + TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"` + UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"` + } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"` + SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"` + ThrottlingData struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + ThrottledTime uint64 `json:"throttled_time,omitempty"` + } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"` +} + +// BlkioStatsEntry is a stats entry for blkio_stats +type BlkioStatsEntry struct { + Major uint64 `json:"major,omitempty" yaml:"major,omitempty"` + Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"` + Op string `json:"op,omitempty" yaml:"op,omitempty"` + Value uint64 `json:"value,omitempty" yaml:"value,omitempty"` +} + +// StatsOptions specify parameters to the Stats function. +// +// See http://goo.gl/DFMiYD for more details. +type StatsOptions struct { + ID string + Stats chan<- *Stats + Stream bool + // A flag that enables stopping the stats operation + Done <-chan bool + // Initial connection timeout + Timeout time.Duration +} + +// Stats sends container statistics for the given container to the given channel. +// +// This function is blocking, similar to a streaming call for logs, and should be run +// on a separate goroutine from the caller. Note that this function will block until +// the given container is removed, not just exited. When finished, this function +// will close the given channel. Alternatively, function can be stopped by signaling on the Done channel +// +// See http://goo.gl/DFMiYD for more details. +func (c *Client) Stats(opts StatsOptions) (retErr error) { + errC := make(chan error, 1) + readCloser, writeCloser := io.Pipe() + + defer func() { + close(opts.Stats) + + select { + case err := <-errC: + if err != nil && retErr == nil { + retErr = err + } + default: + // No errors + } + + if err := readCloser.Close(); err != nil && retErr == nil { + retErr = err + } + }() + + go func() { + err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ + rawJSONStream: true, + useJSONDecoder: true, + stdout: writeCloser, + timeout: opts.Timeout, + }) + if err != nil { + dockerError, ok := err.(*Error) + if ok { + if dockerError.Status == http.StatusNotFound { + err = &NoSuchContainer{ID: opts.ID} + } + } + } + if closeErr := writeCloser.Close(); closeErr != nil && err == nil { + err = closeErr + } + errC <- err + close(errC) + }() + + quit := make(chan struct{}) + defer close(quit) + go func() { + // block here waiting for the signal to stop function + select { + case <-opts.Done: + readCloser.Close() + case <-quit: + return + } + }() + + decoder := json.NewDecoder(readCloser) + stats := new(Stats) + for err := decoder.Decode(&stats); err != io.EOF; err = decoder.Decode(stats) { + if err != nil { + return err + } + opts.Stats <- stats + stats = new(Stats) + } + return nil +} + +// KillContainerOptions represents the set of options that can be used in a +// call to KillContainer. +// +// See http://goo.gl/TFkECx for more details. +type KillContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // The signal to send to the container. When omitted, Docker server + // will assume SIGKILL. + Signal Signal +} + +// KillContainer kills a container, returning an error in case of failure. +// +// See http://goo.gl/TFkECx for more details. +func (c *Client) KillContainer(opts KillContainerOptions) error { + path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) + _, status, err := c.do("POST", path, doOptions{}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + if err != nil { + return err + } + return nil +} + +// RemoveContainerOptions encapsulates options to remove a container. +// +// See http://goo.gl/ZB83ji for more details. +type RemoveContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // A flag that indicates whether Docker should remove the volumes + // associated to the container. + RemoveVolumes bool `qs:"v"` + + // A flag that indicates whether Docker should remove the container + // even if it is currently running. + Force bool +} + +// RemoveContainer removes a container, returning an error in case of failure. +// +// See http://goo.gl/ZB83ji for more details. +func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { + path := "/containers/" + opts.ID + "?" + queryString(opts) + _, status, err := c.do("DELETE", path, doOptions{}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + if err != nil { + return err + } + return nil +} + +// CopyFromContainerOptions is the set of options that can be used when copying +// files or folders from a container. +// +// See http://goo.gl/rINMlw for more details. +type CopyFromContainerOptions struct { + OutputStream io.Writer `json:"-"` + Container string `json:"-"` + Resource string +} + +// CopyFromContainer copy files or folders from a container, using a given +// resource. +// +// See http://goo.gl/rINMlw for more details. +func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + url := fmt.Sprintf("/containers/%s/copy", opts.Container) + body, status, err := c.do("POST", url, doOptions{data: opts}) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.Container} + } + if err != nil { + return err + } + _, err = io.Copy(opts.OutputStream, bytes.NewBuffer(body)) + return err +} + +// WaitContainer blocks until the given container stops, return the exit code +// of the container status. +// +// See http://goo.gl/J88DHU for more details. +func (c *Client) WaitContainer(id string) (int, error) { + body, status, err := c.do("POST", "/containers/"+id+"/wait", doOptions{}) + if status == http.StatusNotFound { + return 0, &NoSuchContainer{ID: id} + } + if err != nil { + return 0, err + } + var r struct{ StatusCode int } + err = json.Unmarshal(body, &r) + if err != nil { + return 0, err + } + return r.StatusCode, nil +} + +// CommitContainerOptions aggregates parameters to the CommitContainer method. +// +// See http://goo.gl/Jn8pe8 for more details. +type CommitContainerOptions struct { + Container string + Repository string `qs:"repo"` + Tag string + Message string `qs:"m"` + Author string + Run *Config `qs:"-"` +} + +// CommitContainer creates a new image from a container's changes. +// +// See http://goo.gl/Jn8pe8 for more details. +func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { + path := "/commit?" + queryString(opts) + body, status, err := c.do("POST", path, doOptions{data: opts.Run}) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + if err != nil { + return nil, err + } + var image Image + err = json.Unmarshal(body, &image) + if err != nil { + return nil, err + } + return &image, nil +} + +// AttachToContainerOptions is the set of options that can be used when +// attaching to a container. +// +// See http://goo.gl/RRAhws for more details. +type AttachToContainerOptions struct { + Container string `qs:"-"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // Get container logs, sending it to OutputStream. + Logs bool + + // Stream the response? + Stream bool + + // Attach to stdin, and use InputStream. + Stdin bool + + // Attach to stdout, and use OutputStream. + Stdout bool + + // Attach to stderr, and use ErrorStream. + Stderr bool + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// AttachToContainer attaches to a container, using the given options. +// +// See http://goo.gl/RRAhws for more details. +func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + path := "/containers/" + opts.Container + "/attach?" + queryString(opts) + return c.hijack("POST", path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + }) +} + +// LogsOptions represents the set of options used when getting logs from a +// container. +// +// See http://goo.gl/rLhKSU for more details. +type LogsOptions struct { + Container string `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + Follow bool + Stdout bool + Stderr bool + Since int64 + Timestamps bool + Tail string + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// Logs gets stdout and stderr logs from the specified container. +// +// See http://goo.gl/rLhKSU for more details. +func (c *Client) Logs(opts LogsOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if opts.Tail == "" { + opts.Tail = "all" + } + path := "/containers/" + opts.Container + "/logs?" + queryString(opts) + return c.stream("GET", path, streamOptions{ + setRawTerminal: opts.RawTerminal, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + }) +} + +// ResizeContainerTTY resizes the terminal to the given height and width. +func (c *Client) ResizeContainerTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + _, _, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) + return err +} + +// ExportContainerOptions is the set of parameters to the ExportContainer +// method. +// +// See http://goo.gl/hnzE62 for more details. +type ExportContainerOptions struct { + ID string + OutputStream io.Writer +} + +// ExportContainer export the contents of container id as tar archive +// and prints the exported contents to stdout. +// +// See http://goo.gl/hnzE62 for more details. +func (c *Client) ExportContainer(opts ExportContainerOptions) error { + if opts.ID == "" { + return &NoSuchContainer{ID: opts.ID} + } + url := fmt.Sprintf("/containers/%s/export", opts.ID) + return c.stream("GET", url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// NoSuchContainer is the error returned when a given container does not exist. +type NoSuchContainer struct { + ID string + Err error +} + +func (err *NoSuchContainer) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such container: " + err.ID +} + +// ContainerAlreadyRunning is the error returned when a given container is +// already running. +type ContainerAlreadyRunning struct { + ID string +} + +func (err *ContainerAlreadyRunning) Error() string { + return "Container already running: " + err.ID +} + +// ContainerNotRunning is the error returned when a given container is not +// running. +type ContainerNotRunning struct { + ID string +} + +func (err *ContainerNotRunning) Error() string { + return "Container not running: " + err.ID +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go new file mode 100644 index 000000000..00966aa19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go @@ -0,0 +1,1941 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +func TestStateString(t *testing.T) { + started := time.Now().Add(-3 * time.Hour) + var tests = []struct { + input State + expected string + }{ + {State{Running: true, Paused: true}, "^paused$"}, + {State{Running: true, StartedAt: started}, "^Up 3h.*$"}, + {State{Running: false, ExitCode: 7}, "^Exit 7$"}, + } + for _, tt := range tests { + re := regexp.MustCompile(tt.expected) + if got := tt.input.String(); !re.MatchString(got) { + t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) + } + } +} + +func TestListContainers(t *testing.T) { + jsonContainers := `[ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}], + "Created": 1367854152, + "Status": "Exit 0" + } +]` + var expected []APIContainers + err := json.Unmarshal([]byte(jsonContainers), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK}) + containers, err := client.ListContainers(ListContainersOptions{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(containers, expected) { + t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers) + } +} + +func TestListContainersParams(t *testing.T) { + var tests = []struct { + input ListContainersOptions + params map[string][]string + }{ + {ListContainersOptions{}, map[string][]string{}}, + {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}}, + {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}}, + { + ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"}, + map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}}, + }, + { + ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, + map[string][]string{"filters": {"{\"status\":[\"paused\",\"running\"]}"}}, + }, + { + ListContainersOptions{All: true, Filters: map[string][]string{"exited": {"0"}, "status": {"exited"}}}, + map[string][]string{"all": {"1"}, "filters": {"{\"exited\":[\"0\"],\"status\":[\"exited\"]}"}}, + }, + } + fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} + client := newTestClient(fakeRT) + u, _ := url.Parse(client.getURL("/containers/json")) + for _, tt := range tests { + if _, err := client.ListContainers(tt.input); err != nil { + t.Error(err) + } + got := map[string][]string(fakeRT.requests[0].URL.Query()) + if !reflect.DeepEqual(got, tt.params) { + t.Errorf("Expected %#v, got %#v.", tt.params, got) + } + if path := fakeRT.requests[0].URL.Path; path != u.Path { + t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) + } + if meth := fakeRT.requests[0].Method; meth != "GET" { + t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth) + } + fakeRT.Reset() + } +} + +func TestListContainersFailure(t *testing.T) { + var tests = []struct { + status int + message string + }{ + {400, "bad parameter"}, + {500, "internal server error"}, + } + for _, tt := range tests { + client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) + expected := Error{Status: tt.status, Message: tt.message} + containers, err := client.ListContainers(ListContainersOptions{}) + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err) + } + if len(containers) > 0 { + t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers) + } + } +} + +func TestInspectContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "AppArmorProfile": "Profile", + "Created": "2013-05-07T14:51:42.087658+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 17179869184, + "MemorySwap": 34359738368, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "SecurityOpt": [ + "label:user:USER" + ], + "Ulimits": [ + { "Name": "nofile", "Soft": 1024, "Hard": 2048 } + ] + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:00", + "Ghost": false + }, + "Node": { + "ID": "4I4E:QR4I:Z733:QEZK:5X44:Q4T7:W2DD:JRDY:KB2O:PODO:Z5SR:XRB6", + "IP": "192.168.99.105", + "Addra": "192.168.99.105:2376", + "Name": "node-01", + "Cpus": 4, + "Memory": 1048436736, + "Labels": { + "executiondriver": "native-0.2", + "kernelversion": "3.18.5-tinycore64", + "operatingsystem": "Boot2Docker 1.5.0 (TCL 5.4); master : a66bce5 - Tue Feb 10 23:31:27 UTC 2015", + "provider": "virtualbox", + "storagedriver": "aufs" + } + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false, + "CgroupParent": "/mesos", + "Memory": 17179869184, + "MemorySwap": 34359738368 + } +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + container, err := client.InspectContainer(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*container, expected) { + t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestInspectContainerNegativeSwap(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.087658+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 17179869184, + "MemorySwap": -1, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Image": "base", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:00", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + container, err := client.InspectContainer(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*container, expected) { + t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestInspectContainerFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) + expected := Error{Status: 500, Message: "server error"} + container, err := client.InspectContainer("abe033") + if container != nil { + t.Errorf("InspectContainer: Expected container, got %#v", container) + } + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestInspectContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) + container, err := client.InspectContainer("abe033") + if container != nil { + t.Errorf("InspectContainer: Expected container, got %#v", container) + } + expected := &NoSuchContainer{ID: "abe033"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestContainerChanges(t *testing.T) { + jsonChanges := `[ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } +]` + var expected []Change + err := json.Unmarshal([]byte(jsonChanges), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + changes, err := client.ContainerChanges(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(changes, expected) { + t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestContainerChangesFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) + expected := Error{Status: 500, Message: "server error"} + changes, err := client.ContainerChanges("abe033") + if changes != nil { + t.Errorf("ContainerChanges: Expected changes, got %#v", changes) + } + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestContainerChangesNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) + changes, err := client.ContainerChanges("abe033") + if changes != nil { + t.Errorf("ContainerChanges: Expected changes, got %#v", changes) + } + expected := &NoSuchContainer{ID: "abe033"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestCreateContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Warnings": [] +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{AttachStdout: true, AttachStdin: true} + opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} + container, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + if container.ID != id { + t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/create")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + var gotBody Config + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateContainerImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound}) + config := Config{AttachStdout: true, AttachStdin: true} + container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) + if container != nil { + t.Errorf("CreateContainer: expected container, got %#v.", container) + } + if !reflect.DeepEqual(err, ErrNoSuchImage) { + t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestCreateContainerDuplicateName(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusConflict}) + config := Config{AttachStdout: true, AttachStdin: true} + container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) + if container != nil { + t.Errorf("CreateContainer: expected container, got %#v.", container) + } + if err != ErrContainerAlreadyExists { + t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err) + } +} + +func TestCreateContainerWithHostConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{} + hostConfig := HostConfig{PublishAllPorts: true} + opts := CreateContainerOptions{Name: "TestCreateContainerWithHostConfig", Config: &config, HostConfig: &hostConfig} + _, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + var gotBody map[string]interface{} + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } + if _, ok := gotBody["HostConfig"]; !ok { + t.Errorf("CreateContainer: wrong body. HostConfig was not serialized") + } +} + +func TestStartContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StartContainer(id, &HostConfig{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + expectedContentType := "application/json" + if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { + t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) + } +} + +func TestStartContainerNilHostConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StartContainer(id, nil) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + expectedContentType := "application/json" + if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { + t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) + } + var buf [4]byte + req.Body.Read(buf[:]) + if string(buf[:]) != "null" { + t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", id, buf[:]) + } +} + +func TestStartContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.StartContainer("a2344", &HostConfig{}) + expected := &NoSuchContainer{ID: "a2344", Err: err.(*NoSuchContainer).Err} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStartContainerAlreadyRunning(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "container already running", status: http.StatusNotModified}) + err := client.StartContainer("a2334", &HostConfig{}) + expected := &ContainerAlreadyRunning{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStopContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StopContainer(id, 10) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestStopContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.StopContainer("a2334", 10) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStopContainerNotRunning(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "container not running", status: http.StatusNotModified}) + err := client.StopContainer("a2334", 10) + expected := &ContainerNotRunning{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRestartContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.RestartContainer(id, 10) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestRestartContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.RestartContainer("a2334", 10) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestPauseContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.PauseContainer(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestPauseContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.PauseContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestUnpauseContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.UnpauseContainer(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestUnpauseContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.UnpauseContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestKillContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.KillContainer(KillContainerOptions{ID: id}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestKillContainerSignal(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + if signal := req.URL.Query().Get("signal"); signal != "15" { + t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal) + } +} + +func TestKillContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.KillContainer(KillContainerOptions{ID: "a2334"}) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRemoveContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := RemoveContainerOptions{ID: id} + err := client.RemoveContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "DELETE" { + t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id)) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestRemoveContainerRemoveVolumes(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := RemoveContainerOptions{ID: id, RemoveVolumes: true} + err := client.RemoveContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + params := map[string][]string(req.URL.Query()) + expected := map[string][]string{"v": {"1"}} + if !reflect.DeepEqual(params, expected) { + t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params) + } +} + +func TestRemoveContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"}) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestResizeContainerTTY(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.ResizeContainerTTY(id, 40, 80) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + got := map[string][]string(req.URL.Query()) + expectedParams := map[string][]string{ + "w": {"80"}, + "h": {"40"}, + } + if !reflect.DeepEqual(got, expectedParams) { + t.Errorf("Expected %#v, got %#v.", expectedParams, got) + } +} + +func TestWaitContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + status, err := client.WaitContainer(id) + if err != nil { + t.Fatal(err) + } + if status != 56 { + t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestWaitContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.WaitContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestCommitContainer(t *testing.T) { + response := `{"Id":"596069db4bf5"}` + client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK}) + id := "596069db4bf5" + image, err := client.CommitContainer(CommitContainerOptions{}) + if err != nil { + t.Fatal(err) + } + if image.ID != id { + t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID) + } +} + +func TestCommitContainerParams(t *testing.T) { + cfg := Config{Memory: 67108864} + json, _ := json.Marshal(&cfg) + var tests = []struct { + input CommitContainerOptions + params map[string][]string + body []byte + }{ + {CommitContainerOptions{}, map[string][]string{}, nil}, + {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil}, + { + CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"}, + map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "m": {"something"}}, + nil, + }, + { + CommitContainerOptions{Container: "44c004db4b17", Run: &cfg}, + map[string][]string{"container": {"44c004db4b17"}}, + json, + }, + } + fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} + client := newTestClient(fakeRT) + u, _ := url.Parse(client.getURL("/commit")) + for _, tt := range tests { + if _, err := client.CommitContainer(tt.input); err != nil { + t.Error(err) + } + got := map[string][]string(fakeRT.requests[0].URL.Query()) + if !reflect.DeepEqual(got, tt.params) { + t.Errorf("Expected %#v, got %#v.", tt.params, got) + } + if path := fakeRT.requests[0].URL.Path; path != u.Path { + t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) + } + if meth := fakeRT.requests[0].Method; meth != "POST" { + t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth) + } + if tt.body != nil { + if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil { + if bytes.Compare(requestBody, tt.body) != 0 { + t.Errorf("Expected body %#v, got %#v", tt.body, requestBody) + } + } else { + t.Errorf("Error reading request body: %#v", err) + } + } + fakeRT.Reset() + } +} + +func TestCommitContainerFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError}) + _, err := client.CommitContainer(CommitContainerOptions{}) + if err == nil { + t.Error("Expected non-nil error, got .") + } +} + +func TestCommitContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.CommitContainer(CommitContainerOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestAttachToContainerLogs(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19}) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &buf, + Stdout: true, + Stderr: true, + Logs: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "POST" { + t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/attach")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "logs": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestAttachToContainer(t *testing.T) { + var reader = strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := map[string][]string{ + "stdin": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "stream": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestAttachToContainerSentinel(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + success := make(chan struct{}) + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + Success: success, + } + go func() { + if err := client.AttachToContainer(opts); err != nil { + t.Error(err) + } + }() + success <- <-success +} + +func TestAttachToContainerNilStdout(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: nil, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestAttachToContainerNilStderr(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestAttachToContainerRawTerminalFalse(t *testing.T) { + input := strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req = *r + w.WriteHeader(http.StatusOK) + hj, ok := w.(http.Hijacker) + if !ok { + t.Fatal("cannot hijack server connection") + } + conn, _, err := hj.Hijack() + if err != nil { + t.Fatal(err) + } + conn.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + conn.Write([]byte("hello")) + conn.Write([]byte{2, 0, 0, 0, 0, 0, 0, 6}) + conn.Write([]byte("hello!")) + conn.Close() + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: input, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: false, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := map[string][]string{ + "stdin": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "stream": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) + } + if stdout.String() != "hello" { + t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stdout.String()) + } + if stderr.String() != "hello!" { + t.Errorf("AttachToContainer: wrong content written to stderr. Want %q. Got %q.", "hello!", stderr.String()) + } +} + +func TestAttachToContainerWithoutContainer(t *testing.T) { + var client Client + err := client.AttachToContainer(AttachToContainerOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) + } +} + +func TestLogs(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "GET" { + t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/logs")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "follow": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "timestamps": {"1"}, + "tail": {"all"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestLogsNilStdoutDoesntFail(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + opts := LogsOptions{ + Container: "a123456", + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestLogsNilStderrDoesntFail(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + opts := LogsOptions{ + Container: "a123456", + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestLogsSpecifyingTail(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: "100", + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "GET" { + t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/logs")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "follow": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "timestamps": {"1"}, + "tail": {"100"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestLogsRawTerminal(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("something happened!")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + RawTerminal: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: "100", + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } +} + +func TestLogsNoContainer(t *testing.T) { + var client Client + err := client.Logs(LogsOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) + } +} + +func TestNoSuchContainerError(t *testing.T) { + var err = &NoSuchContainer{ID: "i345"} + expected := "No such container: i345" + if got := err.Error(); got != expected { + t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) + } +} + +func TestNoSuchContainerErrorMessage(t *testing.T) { + var err = &NoSuchContainer{ID: "i345", Err: errors.New("some advanced error info")} + expected := "some advanced error info" + if got := err.Error(); got != expected { + t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) + } +} + +func TestExportContainer(t *testing.T) { + content := "exported container tar content" + out := stdoutMock{bytes.NewBufferString(content)} + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} + err := client.ExportContainer(opts) + if err != nil { + t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func TestExportContainerViaUnixSocket(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip(fmt.Sprintf("skipping test on %s", runtime.GOOS)) + } + content := "exported container tar content" + var buf []byte + out := bytes.NewBuffer(buf) + tempSocket := tempfile("export_socket") + defer os.Remove(tempSocket) + endpoint := "unix://" + tempSocket + u, _ := parseEndpoint(endpoint, false) + client := Client{ + HTTPClient: http.DefaultClient, + endpoint: endpoint, + endpointURL: u, + SkipServerVersionCheck: true, + } + listening := make(chan string) + done := make(chan int) + go runStreamConnServer(t, "unix", tempSocket, listening, done) + <-listening // wait for server to start + opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} + err := client.ExportContainer(opts) + <-done // make sure server stopped + if err != nil { + t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) { + defer close(done) + l, err := net.Listen(network, laddr) + if err != nil { + t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err) + listening <- "" + return + } + defer l.Close() + listening <- l.Addr().String() + c, err := l.Accept() + if err != nil { + t.Logf("Accept failed: %v", err) + return + } + c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content")) + c.Close() +} + +func tempfile(filename string) string { + return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid()) +} + +func TestExportContainerNoId(t *testing.T) { + client := Client{} + out := stdoutMock{bytes.NewBufferString("")} + err := client.ExportContainer(ExportContainerOptions{OutputStream: out}) + e, ok := err.(*NoSuchContainer) + if !ok { + t.Errorf("ExportContainer: wrong error. Want NoSuchContainer. Got %#v.", e) + } + if e.ID != "" { + t.Errorf("ExportContainer: wrong ID. Want %q. Got %q", "", e.ID) + } +} + +func TestCopyFromContainer(t *testing.T) { + content := "File content" + out := stdoutMock{bytes.NewBufferString(content)} + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + opts := CopyFromContainerOptions{ + Container: "a123456", + OutputStream: out, + } + err := client.CopyFromContainer(opts) + if err != nil { + t.Errorf("CopyFromContainer: caugh error %#v while copying from container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func TestCopyFromContainerEmptyContainer(t *testing.T) { + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + err := client.CopyFromContainer(CopyFromContainerOptions{}) + _, ok := err.(*NoSuchContainer) + if !ok { + t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err) + } +} + +func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Warnings": [] +}` + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{AttachStdout: true, AttachStdin: true} + opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} + container, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + if container.Name != "TestCreateContainer" { + t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name) + } +} + +func TestAlwaysRestart(t *testing.T) { + policy := AlwaysRestart() + if policy.Name != "always" { + t.Errorf("AlwaysRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) + } + if policy.MaximumRetryCount != 0 { + t.Errorf("AlwaysRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) + } +} + +func TestRestartOnFailure(t *testing.T) { + const retry = 5 + policy := RestartOnFailure(retry) + if policy.Name != "on-failure" { + t.Errorf("RestartOnFailure(%d): wrong policy name. Want %q. Got %q", retry, "on-failure", policy.Name) + } + if policy.MaximumRetryCount != retry { + t.Errorf("RestartOnFailure(%d): wrong MaximumRetryCount. Want %d. Got %d", retry, retry, policy.MaximumRetryCount) + } +} + +func TestNeverRestart(t *testing.T) { + policy := NeverRestart() + if policy.Name != "no" { + t.Errorf("NeverRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) + } + if policy.MaximumRetryCount != 0 { + t.Errorf("NeverRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) + } +} + +func TestTopContainer(t *testing.T) { + jsonTop := `{ + "Processes": [ + [ + "ubuntu", + "3087", + "815", + "0", + "01:44", + "?", + "00:00:00", + "cmd1" + ], + [ + "root", + "3158", + "3087", + "0", + "01:44", + "?", + "00:00:01", + "cmd2" + ] + ], + "Titles": [ + "UID", + "PID", + "PPID", + "C", + "STIME", + "TTY", + "TIME", + "CMD" + ] +}` + var expected TopResult + err := json.Unmarshal([]byte(jsonTop), &expected) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0" + fakeRT := &FakeRoundTripper{message: jsonTop, status: http.StatusOK} + client := newTestClient(fakeRT) + processes, err := client.TopContainer(id, "") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(processes, expected) { + t.Errorf("TopContainer: Expected %#v. Got %#v.", expected, processes) + } + if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 || + processes.Processes[0][7] != "cmd1" { + t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", processes) + } + expectedURI := "/containers/" + id + "/top" + if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { + t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) + } +} + +func TestTopContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.TopContainer("abef348", "") + expected := &NoSuchContainer{ID: "abef348"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestTopContainerWithPsArgs(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound} + client := newTestClient(fakeRT) + expectedErr := &NoSuchContainer{ID: "abef348"} + if _, err := client.TopContainer("abef348", "aux"); !reflect.DeepEqual(expectedErr, err) { + t.Errorf("TopContainer: Expected %v. Got %v.", expectedErr, err) + } + expectedURI := "/containers/abef348/top?ps_args=aux" + if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { + t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) + } +} + +func TestStatsTimeout(t *testing.T) { + + l, err := net.Listen("unix", "/tmp/docker_test.sock") + if err != nil { + t.Fatal(err) + } + received := false + defer l.Close() + go func() { + l.Accept() + received = true + time.Sleep(time.Millisecond * 250) + }() + client, _ := NewClient("unix:///tmp/docker_test.sock") + client.SkipServerVersionCheck = true + errC := make(chan error, 1) + statsC := make(chan *Stats) + done := make(chan bool) + go func() { + errC <- client.Stats(StatsOptions{"c", statsC, true, done, time.Millisecond * 100}) + close(errC) + }() + err = <-errC + e, ok := err.(net.Error) + if !ok || !e.Timeout() { + t.Error("Failed to receive timeout exception") + } + if !received { + t.Fatal("Failed to receive message") + } +} + +func TestStats(t *testing.T) { + jsonStats1 := `{ + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit": 189204833, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 428795731968 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 388177920 + } + ], + "io_serviced_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 25994442 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 1734 + } + ], + "io_queue_recursive": [], + "io_service_time_recursive": [], + "io_wait_time_recursive": [], + "io_merged_recursive": [], + "io_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + } + }` + // 1 second later, cache is 100 + jsonStats2 := `{ + "read" : "2015-01-08T22:57:32.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 100, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 428795731968 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 388177920 + } + ], + "io_serviced_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 25994442 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 1734 + } + ], + "io_queue_recursive": [], + "io_service_time_recursive": [], + "io_wait_time_recursive": [], + "io_merged_recursive": [], + "io_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000 + } + }` + var expected1 Stats + var expected2 Stats + err := json.Unmarshal([]byte(jsonStats1), &expected1) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(jsonStats2), &expected2) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0" + + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(jsonStats1)) + w.Write([]byte(jsonStats2)) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + errC := make(chan error, 1) + statsC := make(chan *Stats) + done := make(chan bool) + go func() { + errC <- client.Stats(StatsOptions{id, statsC, true, done, 0}) + close(errC) + }() + var resultStats []*Stats + for { + stats, ok := <-statsC + if !ok { + break + } + resultStats = append(resultStats, stats) + } + err = <-errC + if err != nil { + t.Fatal(err) + } + if len(resultStats) != 2 { + t.Fatalf("Stats: Expected 2 results. Got %d.", len(resultStats)) + } + if !reflect.DeepEqual(resultStats[0], &expected1) { + t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected1, resultStats[0]) + } + if !reflect.DeepEqual(resultStats[1], &expected2) { + t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected2, resultStats[1]) + } + if req.Method != "GET" { + t.Errorf("Stats: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/" + id + "/stats")) + if req.URL.Path != u.Path { + t.Errorf("Stats: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestStatsContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + statsC := make(chan *Stats) + done := make(chan bool) + err := client.Stats(StatsOptions{"abef348", statsC, true, done, 0}) + expected := &NoSuchContainer{ID: "abef348"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRenameContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := RenameContainerOptions{ID: "something_old", Name: "something_new"} + err := client.RenameContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("RenameContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/something_old/rename?name=something_new")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RenameContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + expectedValues := expectedURL.Query()["name"] + actualValues := req.URL.Query()["name"] + if len(actualValues) != 1 || expectedValues[0] != actualValues[0] { + t.Errorf("RenameContainer: Wrong params in request. Want %q. Got %q.", expectedValues, actualValues) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go new file mode 100644 index 000000000..c54b0b0e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go @@ -0,0 +1,168 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package docker + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// Env represents a list of key-pair represented in the form KEY=VALUE. +type Env []string + +// Get returns the string value of the given key. +func (env *Env) Get(key string) (value string) { + return env.Map()[key] +} + +// Exists checks whether the given key is defined in the internal Env +// representation. +func (env *Env) Exists(key string) bool { + _, exists := env.Map()[key] + return exists +} + +// GetBool returns a boolean representation of the given key. The key is false +// whenever its value if 0, no, false, none or an empty string. Any other value +// will be interpreted as true. +func (env *Env) GetBool(key string) (value bool) { + s := strings.ToLower(strings.Trim(env.Get(key), " \t")) + if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { + return false + } + return true +} + +// SetBool defines a boolean value to the given key. +func (env *Env) SetBool(key string, value bool) { + if value { + env.Set(key, "1") + } else { + env.Set(key, "0") + } +} + +// GetInt returns the value of the provided key, converted to int. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt(key string) int { + return int(env.GetInt64(key)) +} + +// SetInt defines an integer value to the given key. +func (env *Env) SetInt(key string, value int) { + env.Set(key, strconv.Itoa(value)) +} + +// GetInt64 returns the value of the provided key, converted to int64. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt64(key string) int64 { + s := strings.Trim(env.Get(key), " \t") + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return -1 + } + return val +} + +// SetInt64 defines an integer (64-bit wide) value to the given key. +func (env *Env) SetInt64(key string, value int64) { + env.Set(key, strconv.FormatInt(value, 10)) +} + +// GetJSON unmarshals the value of the provided key in the provided iface. +// +// iface is a value that can be provided to the json.Unmarshal function. +func (env *Env) GetJSON(key string, iface interface{}) error { + sval := env.Get(key) + if sval == "" { + return nil + } + return json.Unmarshal([]byte(sval), iface) +} + +// SetJSON marshals the given value to JSON format and stores it using the +// provided key. +func (env *Env) SetJSON(key string, value interface{}) error { + sval, err := json.Marshal(value) + if err != nil { + return err + } + env.Set(key, string(sval)) + return nil +} + +// GetList returns a list of strings matching the provided key. It handles the +// list as a JSON representation of a list of strings. +// +// If the given key matches to a single string, it will return a list +// containing only the value that matches the key. +func (env *Env) GetList(key string) []string { + sval := env.Get(key) + if sval == "" { + return nil + } + var l []string + if err := json.Unmarshal([]byte(sval), &l); err != nil { + l = append(l, sval) + } + return l +} + +// SetList stores the given list in the provided key, after serializing it to +// JSON format. +func (env *Env) SetList(key string, value []string) error { + return env.SetJSON(key, value) +} + +// Set defines the value of a key to the given string. +func (env *Env) Set(key, value string) { + *env = append(*env, key+"="+value) +} + +// Decode decodes `src` as a json dictionary, and adds each decoded key-value +// pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error is returned. +func (env *Env) Decode(src io.Reader) error { + m := make(map[string]interface{}) + if err := json.NewDecoder(src).Decode(&m); err != nil { + return err + } + for k, v := range m { + env.SetAuto(k, v) + } + return nil +} + +// SetAuto will try to define the Set* method to call based on the given value. +func (env *Env) SetAuto(key string, value interface{}) { + if fval, ok := value.(float64); ok { + env.SetInt64(key, int64(fval)) + } else if sval, ok := value.(string); ok { + env.Set(key, sval) + } else if val, err := json.Marshal(value); err == nil { + env.Set(key, string(val)) + } else { + env.Set(key, fmt.Sprintf("%v", value)) + } +} + +// Map returns the map representation of the env. +func (env *Env) Map() map[string]string { + if len(*env) == 0 { + return nil + } + m := make(map[string]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = parts[1] + } + return m +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go new file mode 100644 index 000000000..df5169d06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go @@ -0,0 +1,351 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package docker + +import ( + "bytes" + "errors" + "reflect" + "sort" + "testing" +) + +func TestGet(t *testing.T) { + var tests = []struct { + input []string + query string + expected string + }{ + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""}, + {[]string{"WAT="}, "WAT", ""}, + } + for _, tt := range tests { + env := Env(tt.input) + got := env.Get(tt.query) + if got != tt.expected { + t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got) + } + } +} + +func TestExists(t *testing.T) { + var tests = []struct { + input []string + query string + expected bool + }{ + {[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true}, + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false}, + } + for _, tt := range tests { + env := Env(tt.input) + got := env.Exists(tt.query) + if got != tt.expected { + t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got) + } + } +} + +func TestGetBool(t *testing.T) { + var tests = []struct { + input string + expected bool + }{ + {"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false}, + {"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true}, + {"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false}, + } + env := Env([]string{ + "EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false", + "NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin", + "ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t", + }) + for _, tt := range tests { + got := env.GetBool(tt.input) + if got != tt.expected { + t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got) + } + } +} + +func TestSetBool(t *testing.T) { + var tests = []struct { + input bool + expected string + }{ + {true, "1"}, {false, "0"}, + } + for _, tt := range tests { + var env Env + env.SetBool("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestGetInt(t *testing.T) { + var tests = []struct { + input string + expected int + }{ + {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, + } + env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) + for _, tt := range tests { + got := env.GetInt(tt.input) + if got != tt.expected { + t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) + } + } +} + +func TestSetInt(t *testing.T) { + var tests = []struct { + input int + expected string + }{ + {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, + {0, "0"}, {-34, "-34"}, + } + for _, tt := range tests { + var env Env + env.SetInt("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestGetInt64(t *testing.T) { + var tests = []struct { + input string + expected int64 + }{ + {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, + } + env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) + for _, tt := range tests { + got := env.GetInt64(tt.input) + if got != tt.expected { + t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) + } + } +} + +func TestSetInt64(t *testing.T) { + var tests = []struct { + input int64 + expected string + }{ + {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, + {0, "0"}, {-34, "-34"}, + } + for _, tt := range tests { + var env Env + env.SetInt64("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestGetJSON(t *testing.T) { + var p struct { + Name string `json:"name"` + Age int `json:"age"` + } + var env Env + env.Set("person", `{"name":"Gopher","age":5}`) + err := env.GetJSON("person", &p) + if err != nil { + t.Error(err) + } + if p.Name != "Gopher" { + t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name) + } + if p.Age != 5 { + t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age) + } +} + +func TestGetJSONAbsent(t *testing.T) { + var l []string + var env Env + err := env.GetJSON("person", &l) + if err != nil { + t.Error(err) + } + if l != nil { + t.Errorf("Env.GetJSON(): get unexpected list %v", l) + } +} + +func TestGetJSONFailure(t *testing.T) { + var p []string + var env Env + env.Set("list-person", `{"name":"Gopher","age":5}`) + err := env.GetJSON("list-person", &p) + if err == nil { + t.Errorf("Env.GetJSON(%q): got unexpected error.", "list-person") + } +} + +func TestSetJSON(t *testing.T) { + var p1 = struct { + Name string `json:"name"` + Age int `json:"age"` + }{Name: "Gopher", Age: 5} + var env Env + err := env.SetJSON("person", p1) + if err != nil { + t.Error(err) + } + var p2 struct { + Name string `json:"name"` + Age int `json:"age"` + } + err = env.GetJSON("person", &p2) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(p1, p2) { + t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2) + } +} + +func TestSetJSONFailure(t *testing.T) { + var env Env + err := env.SetJSON("person", unmarshable{}) + if err == nil { + t.Error("Env.SetJSON(): got unexpected error") + } + if env.Exists("person") { + t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person") + } +} + +func TestGetList(t *testing.T) { + var tests = []struct { + input string + expected []string + }{ + {"WAT=wat", []string{"wat"}}, + {`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}}, + {"WAT=", nil}, + } + for _, tt := range tests { + env := Env([]string{tt.input}) + got := env.GetList("WAT") + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got) + } + } +} + +func TestSetList(t *testing.T) { + list := []string{"a", "b", "c"} + var env Env + if err := env.SetList("SOME", list); err != nil { + t.Error(err) + } + if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) { + t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got) + } +} + +func TestSet(t *testing.T) { + var env Env + env.Set("PATH", "/home/bin:/bin") + env.Set("SOMETHING", "/usr/bin") + env.Set("PATH", "/bin") + if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected { + t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) + } + if expected, got := "/bin", env.Get("PATH"); got != expected { + t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) + } +} + +func TestDecode(t *testing.T) { + var tests = []struct { + input string + expectedOut []string + expectedErr string + }{ + { + `{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`, + []string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`}, + "", + }, + {"}}", nil, "invalid character '}' looking for beginning of value"}, + {`{}`, nil, ""}, + } + for _, tt := range tests { + var env Env + err := env.Decode(bytes.NewBufferString(tt.input)) + if tt.expectedErr == "" { + if err != nil { + t.Error(err) + } + } else if tt.expectedErr != err.Error() { + t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err) + } + got := []string(env) + sort.Strings(got) + sort.Strings(tt.expectedOut) + if !reflect.DeepEqual(got, tt.expectedOut) { + t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got) + } + } +} + +func TestSetAuto(t *testing.T) { + buf := bytes.NewBufferString("oi") + var tests = []struct { + input interface{} + expected string + }{ + {10, "10"}, + {10.3, "10"}, + {"oi", "oi"}, + {buf, "{}"}, + {unmarshable{}, "{}"}, + } + for _, tt := range tests { + var env Env + env.SetAuto("SOME", tt.input) + if got := env.Get("SOME"); got != tt.expected { + t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) + } + } +} + +func TestMap(t *testing.T) { + var tests = []struct { + input []string + expected map[string]string + }{ + {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}}, + {nil, nil}, + } + for _, tt := range tests { + env := Env(tt.input) + got := env.Map() + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got) + } + } +} + +type unmarshable struct { +} + +func (unmarshable) MarshalJSON() ([]byte, error) { + return nil, errors.New("cannot marshal") +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go new file mode 100644 index 000000000..5a85983cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go @@ -0,0 +1,305 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/http/httputil" + "sync" + "sync/atomic" + "time" +) + +// APIEvents represents an event returned by the API. +type APIEvents struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty"` + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + From string `json:"From,omitempty" yaml:"From,omitempty"` + Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"` +} + +type eventMonitoringState struct { + sync.RWMutex + sync.WaitGroup + enabled bool + lastSeen *int64 + C chan *APIEvents + errC chan error + listeners []chan<- *APIEvents +} + +const ( + maxMonitorConnRetries = 5 + retryInitialWaitTime = 10. +) + +var ( + // ErrNoListeners is the error returned when no listeners are available + // to receive an event. + ErrNoListeners = errors.New("no listeners present to receive event") + + // ErrListenerAlreadyExists is the error returned when the listerner already + // exists. + ErrListenerAlreadyExists = errors.New("listener already exists for docker events") + + // EOFEvent is sent when the event listener receives an EOF error. + EOFEvent = &APIEvents{ + Status: "EOF", + } +) + +// AddEventListener adds a new listener to container events in the Docker API. +// +// The parameter is a channel through which events will be sent. +func (c *Client) AddEventListener(listener chan<- *APIEvents) error { + var err error + if !c.eventMonitor.isEnabled() { + err = c.eventMonitor.enableEventMonitoring(c) + if err != nil { + return err + } + } + err = c.eventMonitor.addListener(listener) + if err != nil { + return err + } + return nil +} + +// RemoveEventListener removes a listener from the monitor. +func (c *Client) RemoveEventListener(listener chan *APIEvents) error { + err := c.eventMonitor.removeListener(listener) + if err != nil { + return err + } + if len(c.eventMonitor.listeners) == 0 { + c.eventMonitor.disableEventMonitoring() + } + return nil +} + +func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + return ErrListenerAlreadyExists + } + eventState.Add(1) + eventState.listeners = append(eventState.listeners, listener) + return nil +} + +func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + var newListeners []chan<- *APIEvents + for _, l := range eventState.listeners { + if l != listener { + newListeners = append(newListeners, l) + } + } + eventState.listeners = newListeners + eventState.Add(-1) + } + return nil +} + +func (eventState *eventMonitoringState) closeListeners() { + for _, l := range eventState.listeners { + close(l) + eventState.Add(-1) + } + eventState.listeners = nil +} + +func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { + for _, b := range *list { + if b == a { + return true + } + } + return false +} + +func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { + eventState.Lock() + defer eventState.Unlock() + if !eventState.enabled { + eventState.enabled = true + var lastSeenDefault = int64(0) + eventState.lastSeen = &lastSeenDefault + eventState.C = make(chan *APIEvents, 100) + eventState.errC = make(chan error, 1) + go eventState.monitorEvents(c) + } + return nil +} + +func (eventState *eventMonitoringState) disableEventMonitoring() error { + eventState.Lock() + defer eventState.Unlock() + + eventState.closeListeners() + + eventState.Wait() + + if eventState.enabled { + eventState.enabled = false + close(eventState.C) + close(eventState.errC) + } + return nil +} + +func (eventState *eventMonitoringState) monitorEvents(c *Client) { + var err error + for eventState.noListeners() { + time.Sleep(10 * time.Millisecond) + } + if err = eventState.connectWithRetry(c); err != nil { + // terminate if connect failed + eventState.disableEventMonitoring() + return + } + for eventState.isEnabled() { + timeout := time.After(100 * time.Millisecond) + select { + case ev, ok := <-eventState.C: + if !ok { + return + } + if ev == EOFEvent { + eventState.disableEventMonitoring() + return + } + eventState.updateLastSeen(ev) + go eventState.sendEvent(ev) + case err = <-eventState.errC: + if err == ErrNoListeners { + eventState.disableEventMonitoring() + return + } else if err != nil { + defer func() { go eventState.monitorEvents(c) }() + return + } + case <-timeout: + continue + } + } +} + +func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { + var retries int + var err error + for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ { + waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC) + } + return err +} + +func (eventState *eventMonitoringState) noListeners() bool { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) == 0 +} + +func (eventState *eventMonitoringState) isEnabled() bool { + eventState.RLock() + defer eventState.RUnlock() + return eventState.enabled +} + +func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { + eventState.RLock() + defer eventState.RUnlock() + eventState.Add(1) + defer eventState.Done() + if eventState.enabled { + if len(eventState.listeners) == 0 { + eventState.errC <- ErrNoListeners + return + } + + for _, listener := range eventState.listeners { + listener <- event + } + } +} + +func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { + eventState.Lock() + defer eventState.Unlock() + if atomic.LoadInt64(eventState.lastSeen) < e.Time { + atomic.StoreInt64(eventState.lastSeen, e.Time) + } +} + +func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { + uri := "/events" + if startTime != 0 { + uri += fmt.Sprintf("?since=%d", startTime) + } + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + var err error + if c.TLSConfig == nil { + dial, err = net.Dial(protocol, address) + } else { + dial, err = tls.Dial(protocol, address, c.TLSConfig) + } + if err != nil { + return err + } + conn := httputil.NewClientConn(dial, nil) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + res, err := conn.Do(req) + if err != nil { + return err + } + go func(res *http.Response, conn *httputil.ClientConn) { + defer conn.Close() + defer res.Body.Close() + decoder := json.NewDecoder(res.Body) + for { + var event APIEvents + if err = decoder.Decode(&event); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + if c.eventMonitor.isEnabled() { + // Signal that we're exiting. + eventChan <- EOFEvent + } + break + } + errChan <- err + } + if event.Time == 0 { + continue + } + if !c.eventMonitor.isEnabled() { + return + } + eventChan <- &event + } + }(res, conn) + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go new file mode 100644 index 000000000..a308538cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go @@ -0,0 +1,132 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bufio" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestEventListeners(t *testing.T) { + testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient) +} + +func TestTLSEventListeners(t *testing.T) { + testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server { + server := httptest.NewUnstartedServer(handler) + + cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem") + if err != nil { + t.Fatalf("Error loading server key pair: %s", err) + } + + caCert, err := ioutil.ReadFile("testing/data/ca.pem") + if err != nil { + t.Fatalf("Error loading ca certificate: %s", err) + } + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(caCert) { + t.Fatalf("Could not add ca certificate") + } + + server.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caPool, + } + server.StartTLS() + return server + }, func(url string) (*Client, error) { + return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem") + }) +} + +func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) { + response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} +{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} +{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} +{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} +` + + server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rsc := bufio.NewScanner(strings.NewReader(response)) + for rsc.Scan() { + w.Write([]byte(rsc.Text())) + w.(http.Flusher).Flush() + time.Sleep(10 * time.Millisecond) + } + })) + defer server.Close() + + client, err := buildClient(server.URL) + if err != nil { + t.Errorf("Failed to create client: %s", err) + } + client.SkipServerVersionCheck = true + + listener := make(chan *APIEvents, 10) + defer func() { + time.Sleep(10 * time.Millisecond) + if err := client.RemoveEventListener(listener); err != nil { + t.Error(err) + } + }() + + err = client.AddEventListener(listener) + if err != nil { + t.Errorf("Failed to add event listener: %s", err) + } + + timeout := time.After(1 * time.Second) + var count int + + for { + select { + case msg := <-listener: + t.Logf("Received: %v", *msg) + count++ + err = checkEvent(count, msg) + if err != nil { + t.Fatalf("Check event failed: %s", err) + } + if count == 4 { + return + } + case <-timeout: + t.Fatalf("%s timed out waiting on events", testName) + } + } +} + +func checkEvent(index int, event *APIEvents) error { + if event.ID != "dfdf82bd3881" { + return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID) + } + if event.From != "base:latest" { + return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From) + } + var status string + switch index { + case 1: + status = "create" + case 2: + status = "start" + case 3: + status = "stop" + case 4: + status = "destroy" + } + if event.Status != status { + return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go new file mode 100644 index 000000000..8c2c719e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go @@ -0,0 +1,168 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "time" + + "github.com/fsouza/go-dockerclient" +) + +func ExampleClient_AttachToContainer() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + client.SkipServerVersionCheck = true + // Reading logs from container a84849 and sending them to buf. + var buf bytes.Buffer + err = client.AttachToContainer(docker.AttachToContainerOptions{ + Container: "a84849", + OutputStream: &buf, + Logs: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + log.Fatal(err) + } + log.Println(buf.String()) + buf.Reset() + err = client.AttachToContainer(docker.AttachToContainerOptions{ + Container: "a84849", + OutputStream: &buf, + Stdout: true, + Stream: true, + }) + if err != nil { + log.Fatal(err) + } + log.Println(buf.String()) +} + +func ExampleClient_CopyFromContainer() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + cid := "a84849" + var buf bytes.Buffer + filename := "/tmp/output.txt" + err = client.CopyFromContainer(docker.CopyFromContainerOptions{ + Container: cid, + Resource: filename, + OutputStream: &buf, + }) + if err != nil { + log.Fatalf("Error while copying from %s: %s\n", cid, err) + } + content := new(bytes.Buffer) + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + tr.Next() + if err != nil && err != io.EOF { + log.Fatal(err) + } + if _, err := io.Copy(content, tr); err != nil { + log.Fatal(err) + } + log.Println(buf.String()) +} + +func ExampleClient_BuildImage() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + + t := time.Now() + inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) + tr := tar.NewWriter(inputbuf) + tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t}) + tr.Write([]byte("FROM base\n")) + tr.Close() + opts := docker.BuildImageOptions{ + Name: "test", + InputStream: inputbuf, + OutputStream: outputbuf, + } + if err := client.BuildImage(opts); err != nil { + log.Fatal(err) + } +} + +func ExampleClient_ListenEvents() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + + listener := make(chan *docker.APIEvents) + err = client.AddEventListener(listener) + if err != nil { + log.Fatal(err) + } + + defer func() { + + err = client.RemoveEventListener(listener) + if err != nil { + log.Fatal(err) + } + + }() + + timeout := time.After(1 * time.Second) + + for { + select { + case msg := <-listener: + log.Println(msg) + case <-timeout: + break + } + } + +} + +func ExampleEnv_Map() { + e := docker.Env([]string{"A=1", "B=2", "C=3"}) + envs := e.Map() + for k, v := range envs { + fmt.Printf("%s=%q\n", k, v) + } +} + +func ExampleEnv_SetJSON() { + type Person struct { + Name string + Age int + } + p := Person{Name: "Gopher", Age: 4} + var e docker.Env + err := e.SetJSON("person", p) + if err != nil { + log.Fatal(err) + } +} + +func ExampleEnv_GetJSON() { + type Person struct { + Name string + Age int + } + p := Person{Name: "Gopher", Age: 4} + var e docker.Env + e.Set("person", `{"name":"Gopher","age":4}`) + err := e.GetJSON("person", &p) + if err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go new file mode 100644 index 000000000..bc7c5cfcd --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go @@ -0,0 +1,185 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Docs can currently be found at https://github.com/docker/docker/blob/master/docs/sources/reference/api/docker_remote_api_v1.15.md#exec-create + +package docker + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" +) + +// CreateExecOptions specify parameters to the CreateExecContainer function. +// +// See http://goo.gl/8izrzI for more details +type CreateExecOptions struct { + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` +} + +// StartExecOptions specify parameters to the StartExecContainer function. +// +// See http://goo.gl/JW8Lxl for more details +type StartExecOptions struct { + Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"` + + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} `json:"-"` +} + +// Exec is the type representing a `docker exec` instance and containing the +// instance ID +type Exec struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty"` +} + +// ExecProcessConfig is a type describing the command associated to a Exec +// instance. It's used in the ExecInspect type. +// +// See http://goo.gl/ypQULN for more details +type ExecProcessConfig struct { + Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` + User string `json:"user,omitempty" yaml:"user,omitempty"` + Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` + EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` + Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` +} + +// ExecInspect is a type with details about a exec instance, including the +// exit code if the command has finished running. It's returned by a api +// call to /exec/(id)/json +// +// See http://goo.gl/ypQULN for more details +type ExecInspect struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` + OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` + ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` + Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` +} + +// CreateExec sets up an exec instance in a running container `id`, returning the exec +// instance, or an error in case of failure. +// +// See http://goo.gl/8izrzI for more details +func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { + path := fmt.Sprintf("/containers/%s/exec", opts.Container) + body, status, err := c.do("POST", path, doOptions{data: opts}) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + if err != nil { + return nil, err + } + var exec Exec + err = json.Unmarshal(body, &exec) + if err != nil { + return nil, err + } + + return &exec, nil +} + +// StartExec starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See http://goo.gl/JW8Lxl for more details +func (c *Client) StartExec(id string, opts StartExecOptions) error { + if id == "" { + return &NoSuchExec{ID: id} + } + + path := fmt.Sprintf("/exec/%s/start", id) + + if opts.Detach { + _, status, err := c.do("POST", path, doOptions{data: opts}) + if status == http.StatusNotFound { + return &NoSuchExec{ID: id} + } + if err != nil { + return err + } + return nil + } + + return c.hijack("POST", path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + data: opts, + }) +} + +// ResizeExecTTY resizes the tty session used by the exec command id. This API +// is valid only if Tty was specified as part of creating and starting the exec +// command. +// +// See http://goo.gl/YDSx1f for more details +func (c *Client) ResizeExecTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + + path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) + _, _, err := c.do("POST", path, doOptions{}) + return err +} + +// InspectExec returns low-level information about the exec command id. +// +// See http://goo.gl/ypQULN for more details +func (c *Client) InspectExec(id string) (*ExecInspect, error) { + path := fmt.Sprintf("/exec/%s/json", id) + body, status, err := c.do("GET", path, doOptions{}) + if status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + if err != nil { + return nil, err + } + var exec ExecInspect + err = json.Unmarshal(body, &exec) + if err != nil { + return nil, err + } + return &exec, nil +} + +// NoSuchExec is the error returned when a given exec instance does not exist. +type NoSuchExec struct { + ID string +} + +func (err *NoSuchExec) Error() string { + return "No such exec instance: " + err.ID +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go new file mode 100644 index 000000000..2dc8d2100 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go @@ -0,0 +1,262 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" +) + +func TestExecCreate(t *testing.T) { + jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` + var expected struct{ ID string } + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := CreateExecOptions{ + Container: "test", + AttachStdin: true, + AttachStdout: true, + AttachStderr: false, + Tty: false, + Cmd: []string{"touch", "/tmp/file"}, + User: "a-user", + } + execObj, err := client.CreateExec(config) + if err != nil { + t.Fatal(err) + } + expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + if execObj.ID != expectedID { + t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/test/exec")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + var gotBody struct{ ID string } + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } +} + +func TestExecStartDetached(t *testing.T) { + execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + config := StartExecOptions{ + Detach: true, + } + err := client.StartExec(execID, config) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + t.Log(req.Body) + var gotBody struct{ Detach bool } + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } + if !gotBody.Detach { + t.Fatal("Expected Detach in StartExecOptions to be true") + } +} + +func TestExecStartAndAttach(t *testing.T) { + var reader = strings.NewReader("send value") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + success := make(chan struct{}) + execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := StartExecOptions{ + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + RawTerminal: true, + Success: success, + } + go func() { + if err := client.StartExec(execID, opts); err != nil { + t.Error(err) + } + }() + <-success +} + +func TestExecResize(t *testing.T) { + execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + fakeRT := &FakeRoundTripper{status: http.StatusOK} + client := newTestClient(fakeRT) + err := client.ResizeExecTTY(execID, 10, 20) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20")) + if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() { + t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } +} + +func TestExecInspect(t *testing.T) { + jsonExec := `{ + "ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e", + "Running": true, + "ExitCode": 0, + "ProcessConfig": { + "privileged": false, + "user": "", + "tty": true, + "entrypoint": "bash", + "arguments": [] + }, + "OpenStdin": true, + "OpenStderr": true, + "OpenStdout": true, + "Container": { + "State": { + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Pid": 29392, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-01-21T17:08:59.634662178Z", + "FinishedAt": "0001-01-01T00:00:00Z" + }, + "ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521", + "Created": "2015-01-21T17:08:59.46407212Z", + "Path": "/bin/bash", + "Args": [ + "-lc", + "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" + ], + "Config": { + "Hostname": "922cd0568714", + "Domainname": "", + "User": "ubuntu", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 100, + "Cpuset": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": { + "8888/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/bash", + "-lc", + "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" + ], + "Image": "tsuru/app-gtest", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null + }, + "Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544", + "NetworkSettings": { + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "MacAddress": "02:42:ac:11:00:08", + "LinkLocalIPv6Address": "fe80::42:acff:fe11:8", + "LinkLocalIPv6PrefixLen": 64, + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "Gateway": "172.17.42.1", + "IPv6Gateway": "", + "Bridge": "docker0", + "PortMapping": null, + "Ports": { + "8888/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49156" + } + ] + } + }, + "ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname", + "HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts", + "Name": "/c7e43b72288ee9d0270a", + "Driver": "aufs", + "ExecDriver": "native-0.2", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "", + "RestartCount": 0, + "UpdateDns": false, + "Volumes": {}, + "VolumesRW": {} + } + }` + var expected ExecInspect + err := json.Unmarshal([]byte(jsonExec), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK} + client := newTestClient(fakeRT) + expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e" + execObj, err := client.InspectExec(expectedID) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*execObj, expected) { + t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..a38715497 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,26 @@ +# (Unreleased) + +logrus/core: improve performance of text formatter by 40% +logrus/core: expose `LevelHooks` type + +# 0.8.2 + +logrus: fix more Fatal family functions + +# 0.8.1 + +logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +logrus: defaults to stderr instead of stdout +hooks/sentry: add special field for `*http.Request` +formatter/text: ignore Windows for colors + +# 0.7.3 + +formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +formatter/text: Add configuration option for time format (#158) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..4be378476 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,355 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` + + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&logrus.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). + + ```go + logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) + ``` + +Third party logging formatters: + +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ÅÌ Í•Í–ÌšfÌÍÌ  ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗lÍ–ÍŽg̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..699ea035c --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,254 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +func (entry *Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 000000000..cd90aa7dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,53 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" +) + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..a67e1b802 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,188 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..104d689f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 000000000..c6d290c77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 000000000..938b97495 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..2ad6dc5cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 000000000..1d7087325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..e4974bfbe --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,206 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stdout`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Ff you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..43ee12e90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,94 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var _ StdLogger = &log.Logger{} + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 000000000..e8719b090 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,301 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..71f8d67a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd openbsd netbsd dragonfly + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go new file mode 100644 index 000000000..0428ee5d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go @@ -0,0 +1,20 @@ +/* + Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +*/ +package logrus + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..a2c0b40db --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..b8bebc13e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go new file mode 100644 index 000000000..af609a53d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go @@ -0,0 +1,7 @@ +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..2e09f6f7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..2e6fe1bdd --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,158 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + f.appendKeyValue(b, "msg", entry.Message) + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + + switch value := value.(type) { + case string: + if needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", value) + } + default: + fmt.Fprint(b, value) + } + + b.WriteByte(' ') +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 000000000..e25a44f67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..1e30b1c75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go new file mode 100644 index 000000000..b854227e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go @@ -0,0 +1,62 @@ +package opts + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + // EnvironmentVariableRegexp A regexp to validate correct environment variables + // Environment variables set by the user must have a name consisting solely of + // alphabetics, numerics, and underscores - the first of which must not be numeric. + EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") +) + +// ParseEnvFile Read in a line delimited file with environment variables enumerated +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + line := scanner.Text() + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + + if !EnvironmentVariableRegexp.MatchString(variable) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)} + } + if len(data) > 1 { + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, scanner.Err() +} + +var whiteSpaces = " \t" + +// ErrBadEnvVariable typed error for bad environment variable +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go new file mode 100644 index 000000000..cd0ca8f32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go @@ -0,0 +1,133 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 000000000..a29335e60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package opts + +import "fmt" + +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 000000000..55eac2aac --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package opts + +import "fmt" + +var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go new file mode 100644 index 000000000..b1f958755 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go @@ -0,0 +1,35 @@ +package opts + +import ( + "fmt" + "net" +) + +// IpOpt type that hold an IP +type IpOpt struct { + *net.IP +} + +func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { + o := &IpOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +func (o *IpOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +func (o *IpOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go new file mode 100644 index 000000000..b6b526a57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIpOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIpOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIpOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IpOpt{IP: &ip} + + invalidIp := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIp) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go new file mode 100644 index 000000000..aa409b99e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go @@ -0,0 +1,323 @@ +package opts + +import ( + "fmt" + "net" + "os" + "path" + "regexp" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 + DefaultHTTPHost = "127.0.0.1" + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp:// + // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter + // is not supplied. A better longer term solution would be to use a named + // pipe as the default on the Windows daemon. + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" +) + +// ListOpts type that hold a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts Create a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete remove the given element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +// FIXME: can we remove this? +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values' slice. +// FIXME: Can we remove this? +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// Get checks the existence of the given key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +//MapOpts type that holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// ValidatorFctType validator that return a validate string and/or an error +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType validator that return a validate list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateAttach Validates that the specified string is a valid attach option. +func ValidateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") +} + +// ValidateLink Validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + if _, _, err := parsers.ParseLink(val); err != nil { + return val, err + } + return val, nil +} + +// ValidateDevice Validate a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +func ValidateDevice(val string) (string, error) { + return validatePath(val, false) +} + +// ValidatePath Validate a path for volumes +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:rw|ro] +// It will also validate the mount mode. +func ValidatePath(val string) (string, error) { + return validatePath(val, true) +} + +func validatePath(val string, validateMountMode bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + + splited := strings.SplitN(val, ":", 3) + if splited[0] == "" { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + switch len(splited) { + case 1: + containerPath = splited[0] + val = path.Clean(containerPath) + case 2: + if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid { + containerPath = splited[0] + mode = splited[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = splited[1] + val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath)) + } + case 3: + containerPath = splited[1] + mode = splited[2] + if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid { + return val, fmt.Errorf("bad mount mode specified : %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// ValidateEnv Validate an environment variable and returns it +// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid. +// If no value is specified, it returns the current value using os.Getenv. +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + if !EnvironmentVariableRegexp.MatchString(arr[0]) { + return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)} + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +// ValidateIPAddress Validates an Ip address +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateMACAddress Validates a MAC address +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} + +// ValidateDNSSearch Validates domain for resolvconf search configuration. +// A zero length domain is represented by . +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateExtraHost Validate that the given string is a valid extrahost and returns it +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateLabel Validate that the given string is a valid label, and returns it +// Labels are in the form on key=value +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateHost Validate that the given string is a valid host and returns it +func ValidateHost(val string) (string, error) { + host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) + if err != nil { + return val, err + } + return host, nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if parts[0] == name { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go new file mode 100644 index 000000000..f08df30be --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go @@ -0,0 +1,479 @@ +package opts + +import ( + "fmt" + "os" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Errorf("validator is not being called") + } +} + +func TestValidateMACAddress(t *testing.T) { + if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) + } + + if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") + } + + if _, err := ValidateMACAddress(`random invalid string`); err == nil { + t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateLink(t *testing.T) { + valid := []string{ + "name", + "dcdfbe62ecd0:alias", + "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", + "angry_torvalds:linus", + } + invalid := map[string]string{ + "": "empty string specified for links", + "too:much:of:it": "bad format for links: too:much:of:it", + } + + for _, link := range valid { + if _, err := ValidateLink(link); err != nil { + t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) + } + } + + for link, expectedError := range invalid { + if _, err := ValidateLink(link); err == nil { + t.Fatalf("ValidateLink(`%q`) should have failed validation", link) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) + } + } + } +} + +func TestValidatePath(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + "/path:rw", + "/path:ro", + "/rw:rw", + } + invalid := map[string]string{ + "": "bad format for volumes: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for volumes: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for volumes: :test", + ":/test": "bad format for volumes: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for volumes: :test:", + "::": "bad format for volumes: ::", + ":::": "bad format for volumes: :::", + "/tmp:::": "bad format for volumes: /tmp:::", + ":/tmp::": "bad format for volumes: :/tmp::", + "path:ro": "path is not an absolute path", + "/path:/path:sw": "bad mount mode specified : sw", + "/path:/path:rwz": "bad mount mode specified : rwz", + } + + for _, path := range valid { + if _, err := ValidatePath(path); err != nil { + t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidatePath(path); err == nil { + t.Fatalf("ValidatePath(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for volumes: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for volumes: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for volumes: :test", + ":/test": "bad format for volumes: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for volumes: :test:", + "::": "bad format for volumes: ::", + ":::": "bad format for volumes: :::", + "/tmp:::": "bad format for volumes: /tmp:::", + ":/tmp::": "bad format for volumes: :/tmp::", + "path:ro": "ro is not an absolute path", + } + + for _, path := range valid { + if _, err := ValidateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestValidateEnv(t *testing.T) { + invalids := map[string]string{ + "some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable", + "asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable", + "1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable", + "123": "poorly formatted environment: variable '123' is not a valid environment variable", + } + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + } + for value, expectedError := range invalids { + _, err := ValidateEnv(value) + if err == nil { + t.Fatalf("Expected ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err) + } + if err.Error() != expectedError { + t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error()) + } + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func TestValidateHost(t *testing.T) { + invalid := map[string]string{ + "anything": "Invalid bind address format: anything", + "something with spaces": "Invalid bind address format: something with spaces", + "://": "Invalid bind address format: ://", + "unknown://": "Invalid bind address format: unknown://", + "tcp://": "Invalid proto, expected tcp: ", + "tcp://:port": "Invalid bind address format: :port", + "tcp://invalid": "Invalid bind address format: invalid", + "tcp://invalid:port": "Invalid bind address format: invalid:port", + } + valid := map[string]string{ + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://:2375": "tcp://127.0.0.1:2375", // default ip address + "tcp://:2376": "tcp://127.0.0.1:2376", // default ip address + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix:///var/run/docker.sock", // default unix:// value + "unix://path/to/socket": "unix://path/to/socket", + } + + for value, errorMessage := range invalid { + if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage { + t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) + } + } + for value, expected := range valid { + if actual, err := ValidateHost(value); err != nil || actual != expected { + t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go new file mode 100644 index 000000000..54f6c4e3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" +) + +type UlimitOpt struct { + values *map[string]*ulimit.Ulimit +} + +func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*ulimit.Ulimit{} + } + return &UlimitOpt{ref} +} + +func (o *UlimitOpt) Set(val string) error { + l, err := ulimit.Parse(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +func (o *UlimitOpt) GetList() []*ulimit.Ulimit { + var ulimits []*ulimit.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go new file mode 100644 index 000000000..ad284e754 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*ulimit.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000..7306840b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,902 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +type ( + Archive io.ReadCloser + ArchiveReader io.Reader + Compression int + TarChownOptions struct { + UID, GID int + } + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + ChownOpts *TarChownOptions + Name string + IncludeSourceDir bool + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} +) + +const ( + Uncompressed Compression = iota + Bzip2 + Gzip + Xz +) + +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return CmdStream(exec.Command(args[0], args[1:]...), archive) +} + +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } else { + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Debugf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + var renamedRelFilePath string // For when tar.Options.Name is set + for _, include := range options.IncludeFiles { + // We can't use filepath.Join(srcPath, include) because this will + // clean away a trailing "." or "/" which may be important. + walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator)) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Debugf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + if !exceptions && f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // TODO Windows: Verify if this needs to be os.Pathseparator + // Rename the base resource + if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { + renamedRelFilePath = relFilePath + } + // Set this to make sure the items underneath also get renamed + if options.Name != "" { + relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", filePath, err) + } + return nil + }) + } + }() + + return pipeReader, nil +} + +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + var r io.Reader = tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, nil) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + if err := archiver.Untar(archive, dst, nil); err != nil { + return err + } + return nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// CmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + if input != nil { + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + // Write stdin if any + go func() { + io.Copy(stdin, input) + stdin.Close() + }() + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + pipeR, pipeW := io.Pipe() + errChan := make(chan []byte) + // Collect stderr, we will use it in case of an error + go func() { + errText, e := ioutil.ReadAll(stderr) + if e != nil { + errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") + } + errChan <- errText + }() + // Copy stdout to the returned pipe + go func() { + _, err := io.Copy(pipeW, stdout) + if err != nil { + pipeW.CloseWithError(err) + } + errText := <-errChan + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) + } else { + pipeW.Close() + } + }() + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go new file mode 100644 index 000000000..4bb4f6ff5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go @@ -0,0 +1,1204 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.gz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a gzip file.") + } +} + +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") + } +} + +func TestDecompressStreamXz(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a xz file.") + } +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of a uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, err := CmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := path.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := path.Join(tempFolder, "src") + _, err = os.Create(srcFile) + if err != nil { + t.Fatalf("Fail to create the source file") + } + err = UntarPath(srcFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := path.Join(destFolder, srcFile) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := path.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := path.Join(destFolder, srcFile) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := path.Join(tempFolder, "dest") + invalidSrc := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, path.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := path.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := path.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarUntarWithXattr(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000..5c754373f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go new file mode 100644 index 000000000..18f45c480 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -0,0 +1,60 @@ +// +build !windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000..10db4bd00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,50 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "strings" +) + +// canonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go new file mode 100644 index 000000000..72bc71e06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -0,0 +1,65 @@ +// +build windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000..c7838e859 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,383 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +type ChangeType int + +const ( + ChangeModify = iota + ChangeAdd + ChangeDelete +) + +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, ".wh.") { + originalFile := file[len(".wh."):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +type FileInfo struct { + parent *FileInfo + name string + stat *system.Stat_t + children map[string]*FileInfo + capability []byte + added bool +} + +func (root *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := root + if path == string(os.PathSeparator) { + return root + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var size int64 + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, _ := os.Lstat(file) + if fileInfo != nil && !fileInfo.IsDir() { + size += fileInfo.Size() + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000..378cc09c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,285 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000..35832f087 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go new file mode 100644 index 000000000..9d528e614 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -0,0 +1,127 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + //defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go new file mode 100644 index 000000000..509bdb2e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go @@ -0,0 +1,495 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000..dc1ea608b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package archive + +import ( + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.Uid() != newStat.Uid() || + oldStat.Gid() != newStat.Gid() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000..6026575e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,20 @@ +package archive + +import ( + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000..576f336ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,308 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) { + if !HasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// AssertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func AssertsDirectory(path string) bool { + return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path) +} + +// HasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func HasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// SpecifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func SpecifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its +// parent directory and its basename in that directory. +func SplitPathDirEntry(localizedPath string) (dir, base string) { + normalizedPath := filepath.ToSlash(localizedPath) + vol := filepath.VolumeName(normalizedPath) + normalizedPath = normalizedPath[len(vol):] + + if normalizedPath == "/" { + // Specifies the root path. + return filepath.FromSlash(vol + normalizedPath), "." + } + + trimmedPath := vol + strings.TrimRight(normalizedPath, "/") + + dir = filepath.FromSlash(path.Dir(trimmedPath)) + base = filepath.FromSlash(path.Base(trimmedPath)) + + return dir, base +} + +// TarResource archives the resource at the given sourcePath into a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourcePath string) (content Archive, err error) { + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) { + // In the case where the source path is a symbolic link AND it ends + // with a path separator, we will want to evaluate the symbolic link. + trimmedPath := sourcePath[:len(sourcePath)-1] + stat, err := os.Lstat(trimmedPath) + if err != nil { + return nil, err + } + + if stat.Mode()&os.ModeSymlink != 0 { + if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil { + return nil, err + } + } + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool +} + +// CopyInfoStatPath stats the given path to create a CopyInfo +// struct representing that resource. If mustExist is true, then +// it is an error if there is no file or directory at the given path. +func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) { + pathInfo := CopyInfo{Path: path} + + fileInfo, err := os.Lstat(path) + + if err == nil { + pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir() + } else if os.IsNotExist(err) && !mustExist { + err = nil + } + + return pathInfo, err +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case AssertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// rebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurance of oldBase with newBase at the beginning of entry names. +func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string) error { + var ( + srcInfo CopyInfo + err error + ) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil { + return err + } + + content, err := TarResource(srcPath) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { + dstInfo, err := CopyInfoStatPath(dstPath, false) + if err != nil { + return err + } + + if !dstInfo.Exists { + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(dstPath) + + dstStat, err := os.Lstat(dstParent) + if err != nil { + return err + } + if !dstStat.IsDir() { + return ErrNotDirectory + } + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go new file mode 100644 index 000000000..dd0b32362 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go @@ -0,0 +1,637 @@ +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q", srcPath, dstPath) + + return CopyResource(srcPath, dstPath) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + content, err := TarResource(filepath.Join(tmpDirA, "file1")) + if err == nil { + content.Close() + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + content, err := TarResource(joinTrailingSep(tmpDirA, "file1")) + if err == nil { + content.Close() + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000..e305b5e4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000..2b775b45c --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000..10a63a051 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,210 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" +) + +func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertantly. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, ".wh..wh.") { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + continue + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, ".wh.") { + originalBase := base[len(".wh."):] + originalPath := filepath.Join(filepath.Dir(path), originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go new file mode 100644 index 000000000..01ed43728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go @@ -0,0 +1,190 @@ +package archive + +import ( + "archive/tar" + "testing" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 000000000..a5e08e4ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000..3448569b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..e85aac054 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go new file mode 100644 index 000000000..f5cacea8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, ArchiveReader(r)) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000..dfb335c0b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go new file mode 100644 index 000000000..46ab36697 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 000000000..1b8cadc63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,196 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 000000000..b544ffbf2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,402 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 000000000..dcae17882 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go new file mode 100644 index 000000000..7a95cb2bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 000000000..801132ff3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go @@ -0,0 +1,14 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go new file mode 100644 index 000000000..896886329 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 000000000..f231aa9da --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx += 1 + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go new file mode 100644 index 000000000..de495b56d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go @@ -0,0 +1,149 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 000000000..ff09baad1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,254 @@ +package ioutils + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io" + "math/big" + "sync" + "time" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// bufReader allows the underlying reader to continue to produce +// output by pre-emptively reading from the wrapped reader. +// This is achieved by buffering this data in bufReader's +// expanding buffer. +type bufReader struct { + sync.Mutex + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond + drainBuf []byte + reuseBuf []byte + maxReuse int64 + resetTimeout time.Duration + bufLenResetThreshold int64 + maxReadDataReset int64 +} + +func NewBufReader(r io.Reader) *bufReader { + var timeout int + if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { + timeout = int(randVal.Int64()) + 180 + } else { + timeout = 300 + } + reader := &bufReader{ + buf: &bytes.Buffer{}, + drainBuf: make([]byte, 1024), + reuseBuf: make([]byte, 4096), + maxReuse: 1000, + resetTimeout: time.Second * time.Duration(timeout), + bufLenResetThreshold: 100 * 1024, + maxReadDataReset: 10 * 1024 * 1024, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { + reader := &bufReader{ + buf: buffer, + drainBuf: drainBuffer, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func (r *bufReader) drain() { + var ( + duration time.Duration + lastReset time.Time + now time.Time + reset bool + bufLen int64 + dataSinceReset int64 + maxBufLen int64 + reuseBufLen int64 + reuseCount int64 + ) + reuseBufLen = int64(len(r.reuseBuf)) + lastReset = time.Now() + for { + n, err := r.reader.Read(r.drainBuf) + dataSinceReset += int64(n) + r.Lock() + bufLen = int64(r.buf.Len()) + if bufLen > maxBufLen { + maxBufLen = bufLen + } + + // Avoid unbounded growth of the buffer over time. + // This has been discovered to be the only non-intrusive + // solution to the unbounded growth of the buffer. + // Alternative solutions such as compression, multiple + // buffers, channels and other similar pieces of code + // were reducing throughput, overall Docker performance + // or simply crashed Docker. + // This solution releases the buffer when specific + // conditions are met to avoid the continuous resizing + // of the buffer for long lived containers. + // + // Move data to the front of the buffer if it's + // smaller than what reuseBuf can store + if bufLen > 0 && reuseBufLen >= bufLen { + n, _ := r.buf.Read(r.reuseBuf) + r.buf.Write(r.reuseBuf[0:n]) + // Take action if the buffer has been reused too many + // times and if there's data in the buffer. + // The timeout is also used as means to avoid doing + // these operations more often or less often than + // required. + // The various conditions try to detect heavy activity + // in the buffer which might be indicators of heavy + // growth of the buffer. + } else if reuseCount >= r.maxReuse && bufLen > 0 { + now = time.Now() + duration = now.Sub(lastReset) + timeoutReached := duration >= r.resetTimeout + + // The timeout has been reached and the + // buffered data couldn't be moved to the front + // of the buffer, so the buffer gets reset. + if timeoutReached && bufLen > reuseBufLen { + reset = true + } + // The amount of buffered data is too high now, + // reset the buffer. + if timeoutReached && maxBufLen >= r.bufLenResetThreshold { + reset = true + } + // Reset the buffer if a certain amount of + // data has gone through the buffer since the + // last reset. + if timeoutReached && dataSinceReset >= r.maxReadDataReset { + reset = true + } + // The buffered data is moved to a fresh buffer, + // swap the old buffer with the new one and + // reset all counters. + if reset { + newbuf := &bytes.Buffer{} + newbuf.ReadFrom(r.buf) + r.buf = newbuf + lastReset = now + reset = false + dataSinceReset = 0 + maxBufLen = 0 + reuseCount = 0 + } + } + if err != nil { + r.err = err + } else { + r.buf.Write(r.drainBuf[0:n]) + } + reuseCount++ + r.wait.Signal() + r.Unlock() + callSchedulerIfNecessary() + if err != nil { + break + } + } +} + +func (r *bufReader) Read(p []byte) (n int, err error) { + r.Lock() + defer r.Unlock() + for { + n, err = r.buf.Read(p) + if n > 0 { + return n, err + } + if r.err != nil { + return 0, r.err + } + r.wait.Wait() + } +} + +func (r *bufReader) Close() error { + closer, ok := r.reader.(io.ReadCloser) + if !ok { + return nil + } + return closer.Close() +} + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go new file mode 100644 index 000000000..0a39b6ec6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -0,0 +1,216 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { + reader, writer := io.Pipe() + + drainBuffer := make([]byte, 1024) + buffer := bytes.Buffer{} + bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { + reader := strings.NewReader("buffer") + bufreader := NewBufReader(reader) + + if err := bufreader.Close(); err != nil { + t.Fatal(err) + } + +} + +// implements io.ReadCloser +type simpleReaderCloser struct{} + +func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { + return 0, nil +} + +func (r *simpleReaderCloser) Close() error { + return nil +} + +func TestBufReaderCloseWithReaderCloser(t *testing.T) { + reader := &simpleReaderCloser{} + bufreader := NewBufReader(reader) + + err := bufreader.Close() + if err != nil { + t.Fatal(err) + } + +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type repeatedReader struct { + readCount int + maxReads int + data []byte +} + +func newRepeatedReader(max int, data []byte) *repeatedReader { + return &repeatedReader{0, max, data} +} + +func (r *repeatedReader) Read(p []byte) (int, error) { + if r.readCount >= r.maxReads { + return 0, io.EOF + } + r.readCount++ + n := copy(p, r.data) + return n, nil +} + +func testWithData(data []byte, reads int) { + reader := newRepeatedReader(reads, data) + bufReader := NewBufReader(reader) + io.Copy(ioutil.Discard, bufReader) +} + +func Benchmark1M10BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(10) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark1M1024BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark10k32KBytesReads(b *testing.B) { + reads := 10000 + readSize := int64(32 * 1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go new file mode 100644 index 000000000..3c88f29e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go @@ -0,0 +1,6 @@ +// +build !gccgo + +package ioutils + +func callSchedulerIfNecessary() { +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 000000000..c11d02b94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go @@ -0,0 +1,13 @@ +// +build gccgo + +package ioutils + +import ( + "runtime" +) + +func callSchedulerIfNecessary() { + //allow or force Go scheduler to switch context, without explicitly + //forcing this will make it hang when using gccgo implementation + runtime.Gosched() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..25095474d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,47 @@ +package ioutils + +import ( + "io" + "net/http" + "sync" +) + +type WriteFlusher struct { + sync.Mutex + w io.Writer + flusher http.Flusher + flushed bool +} + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.Lock() + defer wf.Unlock() + n, err = wf.w.Write(b) + wf.flushed = true + wf.flusher.Flush() + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.Lock() + defer wf.Unlock() + wf.flushed = true + wf.flusher.Flush() +} + +func (wf *WriteFlusher) Flushed() bool { + wf.Lock() + defer wf.Unlock() + return wf.flushed +} + +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 000000000..43fdc44ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,60 @@ +package ioutils + +import "io" + +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +type NopFlusher struct{} + +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// Wrap a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go new file mode 100644 index 000000000..564b1cd4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE new file mode 100644 index 000000000..ac74d8f04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md new file mode 100644 index 000000000..da00efa33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md @@ -0,0 +1,40 @@ +Package mflag (aka multiple-flag) implements command-line flag parsing. +It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) + +It adds: + +* both short and long flag version +`./example -s red` `./example --string blue` + +* multiple names for the same option +``` +$>./example -h +Usage of example: + -s, --string="": a simple string +``` + +___ +It is very flexible on purpose, so you can do things like: +``` +$>./example -h +Usage of example: + -s, -string, --string="": a simple string +``` + +Or: +``` +$>./example -h +Usage of example: + -oldflag, --newflag="": a simple string +``` + +You can also hide some flags from the usage, so if we want only `--newflag`: +``` +$>./example -h +Usage of example: + --newflag="": a simple string +$>./example -oldflag str +str +``` + +See [example.go](example/example.go) for more details. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go new file mode 100644 index 000000000..ebfa35010 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go @@ -0,0 +1,1201 @@ +// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mflag implements command-line flag parsing. +// +// Usage: +// +// Define flags using flag.String(), Bool(), Int(), etc. +// +// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. +// import "flag /github.com/docker/docker/pkg/mflag" +// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") +// If you like, you can bind the flag to a variable using the Var() functions. +// var flagvar int +// func init() { +// // -flaghidden will work, but will be hidden from the usage +// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") +// } +// Or you can create custom flags that satisfy the Value interface (with +// pointer receivers) and couple them to flag parsing by +// flag.Var(&flagVal, []string{"name"}, "help message for flagname") +// For such flags, the default value is just the initial value of the variable. +// +// You can also add "deprecated" flags, they are still usable, but are not shown +// in the usage and will display a warning when you try to use them. `#` before +// an option means this option is deprecated, if there is an following option +// without `#` ahead, then that's the replacement, if not, it will just be removed: +// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") +// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or +// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` +// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") +// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` +// so you can only use `-f`. +// +// You can also group one letter flags, bif you declare +// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") +// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") +// you will be able to use the -vs or -sv +// +// After all flags are defined, call +// flag.Parse() +// to parse the command line into the defined flags. +// +// Flags may then be used directly. If you're using the flags themselves, +// they are all pointers; if you bind to variables, they're values. +// fmt.Println("ip has value ", *ip) +// fmt.Println("flagvar has value ", flagvar) +// +// After parsing, the arguments after the flag are available as the +// slice flag.Args() or individually as flag.Arg(i). +// The arguments are indexed from 0 through flag.NArg()-1. +// +// Command line flag syntax: +// -flag +// -flag=x +// -flag="x" +// -flag='x' +// -flag x // non-boolean flags only +// One or two minus signs may be used; they are equivalent. +// The last form is not permitted for boolean flags because the +// meaning of the command +// cmd -x * +// will change if there is a file called 0, false, etc. You must +// use the -flag=false form to turn off a boolean flag. +// +// Flag parsing stops just before the first non-flag argument +// ("-" is a non-flag argument) or after the terminator "--". +// +// Integer flags accept 1234, 0664, 0x1234 and may be negative. +// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. +// Duration flags accept any input valid for time.ParseDuration. +// +// The default set of command-line flags is controlled by +// top-level functions. The FlagSet type allows one to define +// independent sets of flags, such as to implement subcommands +// in a command-line interface. The methods of FlagSet are +// analogous to the top-level functions for the command-line +// flag set. + +package mflag + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// ErrRetry is the error returned if you need to try letter by letter +var ErrRetry = errors.New("flag: retry") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +// ErrorHandling strategies available when a flag parsing error occurs +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + ShortUsage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use Out() accessor + nArgRequirements []nArgRequirement +} + +// A Flag represents the state of a flag. +type Flag struct { + Names []string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") + lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) + if lpi != lpj { + return lpi < lpj + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + var list flagSlice + + // The sorted list is based on the first name, when flag map might use the other names. + nameMap := make(map[string]string) + + for n, f := range flags { + fName := strings.TrimPrefix(f.Names[0], "#") + nameMap[fName] = n + if len(f.Names) == 1 { + list = append(list, fName) + continue + } + + found := false + for _, name := range list { + if name == fName { + found = true + break + } + } + if !found { + list = append(list, fName) + } + } + sort.Sort(list) + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[nameMap[name]] + } + return result +} + +// Name returns the name of the FlagSet. +func (fs *FlagSet) Name() string { + return fs.name +} + +// Out returns the destination for usage and error messages. +func (fs *FlagSet) Out() io.Writer { + if fs.output == nil { + return os.Stderr + } + return fs.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (fs *FlagSet) SetOutput(output io.Writer) { + fs.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (fs *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(fs.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (fs *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(fs.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (fs *FlagSet) Lookup(name string) *Flag { + return fs.formal[name] +} + +// IsSet indicates whether the specified flag is set in the given FlagSet +func (fs *FlagSet) IsSet(name string) bool { + return fs.actual[name] != nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// IsSet indicates whether the specified flag was specified at all on the cmd line. +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + +type nArgRequirementType int + +// Indicator used to pass to BadArgs function +const ( + Exact nArgRequirementType = iota + Max + Min +) + +type nArgRequirement struct { + Type nArgRequirementType + N int +} + +// Require adds a requirement about the number of arguments for the FlagSet. +// The first parameter can be Exact, Max, or Min to respectively specify the exact, +// the maximum, or the minimal number of arguments required. +// The actual check is done in FlagSet.CheckArgs(). +func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { + fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) +} + +// CheckArgs uses the requirements set by FlagSet.Require() to validate +// the number of arguments. If the requirements are not met, +// an error message string is returned. +func (fs *FlagSet) CheckArgs() (message string) { + for _, req := range fs.nArgRequirements { + var arguments string + if req.N == 1 { + arguments = "1 argument" + } else { + arguments = fmt.Sprintf("%d arguments", req.N) + } + + str := func(kind string) string { + return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) + } + + switch req.Type { + case Exact: + if fs.NArg() != req.N { + return str("") + } + case Max: + if fs.NArg() > req.N { + return str("a maximum of ") + } + case Min: + if fs.NArg() < req.N { + return str("a minimum of ") + } + } + } + return "" +} + +// Set sets the value of the named flag. +func (fs *FlagSet) Set(name, value string) error { + flag, ok := fs.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if err := flag.Value.Set(value); err != nil { + return err + } + if fs.actual == nil { + fs.actual = make(map[string]*Flag) + } + fs.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (fs *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) + home := homedir.Get() + + // Don't substitute when HOME is / + if runtime.GOOS != "windows" && home == "/" { + home = "" + } + + // Add a blank line between cmd description and list of options + if fs.FlagCount() > 0 { + fmt.Fprintln(writer, "") + } + + fs.VisitAll(func(flag *Flag) { + format := " -%s=%s" + names := []string{} + for _, name := range flag.Names { + if name[0] != '#' { + names = append(names, name) + } + } + if len(names) > 0 && len(flag.Usage) > 0 { + val := flag.DefValue + + if home != "" && strings.HasPrefix(val, home) { + val = homedir.GetShortcutString() + val[len(home):] + } + + fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) + for i, line := range strings.Split(flag.Usage, "\n") { + if i != 0 { + line = " " + line + } + fmt.Fprintln(writer, "\t", line) + } + } + }) + writer.Flush() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(fs *FlagSet) { + if fs.name == "" { + fmt.Fprintf(fs.Out(), "Usage:\n") + } else { + fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) + } + fs.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// Usage prints to standard error a usage message documenting the standard command layout +// The function is a variable that may be changed to point to a custom function. +var ShortUsage = func() { + fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) +} + +// FlagCount returns the number of flags that have been defined. +func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } + +// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. +func (fs *FlagSet) FlagCountUndeprecated() int { + count := 0 + for _, flag := range sortFlags(fs.formal) { + for _, name := range flag.Names { + if name[0] != '#' { + count++ + break + } + } + } + return count +} + +// NFlag returns the number of flags that have been set. +func (fs *FlagSet) NFlag() int { return len(fs.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (fs *FlagSet) Arg(i int) string { + if i < 0 || i >= len(fs.args) { + return "" + } + return fs.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (fs *FlagSet) NArg() int { return len(fs.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (fs *FlagSet) Args() []string { return fs.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + fs.Var(newBoolValue(value, p), names, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, names []string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), names, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { + p := new(bool) + fs.BoolVar(p, names, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(names []string, value bool, usage string) *bool { + return CommandLine.Bool(names, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { + fs.Var(newIntValue(value, p), names, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, names []string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), names, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (fs *FlagSet) Int(names []string, value int, usage string) *int { + p := new(int) + fs.IntVar(p, names, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(names []string, value int, usage string) *int { + return CommandLine.Int(names, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + fs.Var(newInt64Value(value, p), names, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, names []string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), names, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { + p := new(int64) + fs.Int64Var(p, names, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(names []string, value int64, usage string) *int64 { + return CommandLine.Int64(names, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + fs.Var(newUintValue(value, p), names, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, names []string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), names, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { + p := new(uint) + fs.UintVar(p, names, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(names []string, value uint, usage string) *uint { + return CommandLine.Uint(names, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + fs.Var(newUint64Value(value, p), names, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, names []string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), names, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { + p := new(uint64) + fs.Uint64Var(p, names, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(names []string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(names, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { + fs.Var(newStringValue(value, p), names, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, names []string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), names, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (fs *FlagSet) String(names []string, value string, usage string) *string { + p := new(string) + fs.StringVar(p, names, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(names []string, value string, usage string) *string { + return CommandLine.String(names, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + fs.Var(newFloat64Value(value, p), names, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, names []string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), names, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { + p := new(float64) + fs.Float64Var(p, names, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(names []string, value float64, usage string) *float64 { + return CommandLine.Float64(names, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + fs.Var(newDurationValue(value, p), names, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), names, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + fs.DurationVar(p, names, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(names []string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(names, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (fs *FlagSet) Var(value Value, names []string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{names, usage, value, value.String()} + for _, name := range names { + name = strings.TrimPrefix(name, "#") + _, alreadythere := fs.formal[name] + if alreadythere { + var msg string + if fs.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) + } + fmt.Fprintln(fs.Out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if fs.formal == nil { + fs.formal = make(map[string]*Flag) + } + fs.formal[name] = flag + } +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, names []string, usage string) { + CommandLine.Var(value, names, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (fs *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(fs.Out(), err) + if os.Args[0] == fs.name { + fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) + } else { + fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) + } + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (fs *FlagSet) usage() { + if fs == CommandLine { + Usage() + } else if fs.Usage == nil { + defaultUsage(fs) + } else { + fs.Usage() + } +} + +func trimQuotes(str string) string { + if len(str) == 0 { + return str + } + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (fs *FlagSet) parseOne() (bool, string, error) { + if len(fs.args) == 0 { + return false, "", nil + } + s := fs.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, "", nil + } + if s[1] == '-' && len(s) == 2 { // "--" terminates the flags + fs.args = fs.args[1:] + return false, "", nil + } + name := s[1:] + if len(name) == 0 || name[0] == '=' { + return false, "", fs.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + fs.args = fs.args[1:] + hasValue := false + value := "" + if i := strings.Index(name, "="); i != -1 { + value = trimQuotes(name[i+1:]) + hasValue = true + name = name[:i] + } + + m := fs.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "-help" || name == "help" || name == "h" { // special case for nice help message. + fs.usage() + return false, "", ErrHelp + } + if len(name) > 0 && name[0] == '-' { + return false, "", fs.failf("flag provided but not defined: -%s", name) + } + return false, name, ErrRetry + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(fs.args) > 0 { + // value is the next arg + hasValue = true + value, fs.args = fs.args[0], fs.args[1:] + } + if !hasValue { + return false, "", fs.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if fs.actual == nil { + fs.actual = make(map[string]*Flag) + } + fs.actual[name] = flag + for i, n := range flag.Names { + if n == fmt.Sprintf("#%s", name) { + replacement := "" + for j := i; j < len(flag.Names); j++ { + if flag.Names[j][0] != '#' { + replacement = flag.Names[j] + break + } + } + if replacement != "" { + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + } else { + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } + } + } + return true, "", nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (fs *FlagSet) Parse(arguments []string) error { + fs.parsed = true + fs.args = arguments + for { + seen, name, err := fs.parseOne() + if seen { + continue + } + if err == nil { + break + } + if err == ErrRetry { + if len(name) > 1 { + err = nil + for _, letter := range strings.Split(name, "") { + fs.args = append([]string{"-" + letter}, fs.args...) + seen2, _, err2 := fs.parseOne() + if seen2 { + continue + } + if err2 != nil { + err = fs.failf("flag provided but not defined: -%s", name) + break + } + } + if err == nil { + continue + } + } else { + err = fs.failf("flag provided but not defined: -%s", name) + } + } + switch fs.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// ParseFlags is a utility function that adds a help flag if withHelp is true, +// calls fs.Parse(args) and prints a relevant error message if there are +// incorrect number of arguments. It returns error only if error handling is +// set to ContinueOnError and parsing fails. If error handling is set to +// ExitOnError, it's safe to ignore the return value. +func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { + var help *bool + if withHelp { + help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") + } + if err := fs.Parse(args); err != nil { + return err + } + if help != nil && *help { + fs.SetOutput(os.Stdout) + fs.Usage() + os.Exit(0) + } + if str := fs.CheckArgs(); str != "" { + fs.SetOutput(os.Stderr) + fs.ReportError(str, withHelp) + fs.ShortUsage() + os.Exit(1) + } + return nil +} + +// ReportError is a utility method that prints a user-friendly message +// containing the error that occured during parsing and a suggestion to get help +func (fs *FlagSet) ReportError(str string, withHelp bool) { + if withHelp { + if os.Args[0] == fs.Name() { + str += ".\nSee '" + os.Args[0] + " --help'" + } else { + str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" + } + } + fmt.Fprintf(fs.Out(), "docker: %s.\n", str) +} + +// Parsed reports whether fs.Parse has been called. +func (fs *FlagSet) Parsed() bool { + return fs.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { + fs.name = name + fs.errorHandling = errorHandling +} + +type mergeVal struct { + Value + key string + fset *FlagSet +} + +func (v mergeVal) Set(s string) error { + return v.fset.Set(v.key, s) +} + +func (v mergeVal) IsBoolFlag() bool { + if b, ok := v.Value.(boolFlag); ok { + return b.IsBoolFlag() + } + return false +} + +// Merge is an helper function that merges n FlagSets into a single dest FlagSet +// In case of name collision between the flagsets it will apply +// the destination FlagSet's errorHandling behaviour. +func Merge(dest *FlagSet, flagsets ...*FlagSet) error { + for _, fset := range flagsets { + for k, f := range fset.formal { + if _, ok := dest.formal[k]; ok { + var err error + if fset.name == "" { + err = fmt.Errorf("flag redefined: %s", k) + } else { + err = fmt.Errorf("%s flag redefined: %s", fset.name, k) + } + fmt.Fprintln(fset.Out(), err.Error()) + // Happens only if flags are declared with identical names + switch dest.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + newF := *f + newF.Value = mergeVal{f.Value, k, fset} + dest.formal[k] = &newF + } + } + return nil +} + +// IsEmpty reports if the FlagSet is actually empty. +func (fs *FlagSet) IsEmpty() bool { + return len(fs.actual) == 0 +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go new file mode 100644 index 000000000..85f32c8aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go @@ -0,0 +1,516 @@ +// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mflag + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "testing" + "time" +) + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, false, "bool value") + Int([]string{"test_int"}, 0, "int value") + Int64([]string{"test_int64"}, 0, "int64 value") + Uint([]string{"test_uint"}, 0, "uint value") + Uint64([]string{"test_uint64"}, 0, "uint64 value") + String([]string{"test_string"}, "0", "string value") + Float64([]string{"test_float64"}, 0, "float64 value") + Duration([]string{"test_duration"}, 0, "time.Duration value") + + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + m[name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", name) + } + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { + for _, name := range f.Names { + flagNames = append(flagNames, name) + } + }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestGet(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, true, "bool value") + Int([]string{"test_int"}, 1, "int value") + Int64([]string{"test_int64"}, 2, "int64 value") + Uint([]string{"test_uint"}, 3, "uint value") + Uint64([]string{"test_uint64"}, 4, "uint64 value") + String([]string{"test_string"}, "5", "string value") + Float64([]string{"test_float64"}, 6, "float64 value") + Duration([]string{"test_duration"}, 7, "time.Duration value") + + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + g, ok := f.Value.(Getter) + if !ok { + t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) + return + } + switch name { + case "test_bool": + ok = g.Get() == true + case "test_int": + ok = g.Get() == int(1) + case "test_int64": + ok = g.Get() == int64(2) + case "test_uint": + ok = g.Get() == uint(3) + case "test_uint64": + ok = g.Get() == uint64(4) + case "test_string": + ok = g.Get() == "5" + case "test_float64": + ok = g.Get() == float64(6) + case "test_duration": + ok = g.Get() == time.Duration(7) + } + if !ok { + t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) + } + } + } + } + VisitAll(visitor) +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool([]string{"bool"}, false, "bool value") + bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + f.Bool([]string{"bool3"}, false, "bool3 value") + bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") + intFlag := f.Int([]string{"-int"}, 0, "int value") + int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") + uintFlag := f.Uint([]string{"uint"}, 0, "uint value") + uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") + stringFlag := f.String([]string{"string"}, "0", "string value") + f.String([]string{"string2"}, "0", "string2 value") + singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") + doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") + mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") + mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") + nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") + nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") + float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") + durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") + extra := "one-extra-argument" + args := []string{ + "-bool", + "-bool2=true", + "-bool4=false", + "--int", "22", + "--int64", "0x23", + "-uint", "24", + "--uint64", "25", + "-string", "hello", + "-squote='single'", + `-dquote="double"`, + `-mquote='mixed"`, + `-mquote2="mixed2'`, + `-nquote="'single nested'"`, + `-nquote2='"double nested"'`, + "-float64", "2718e28", + "-duration", "2m", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if !f.IsSet("bool2") { + t.Error("bool2 should be marked as set") + } + if f.IsSet("bool3") { + t.Error("bool3 should not be marked as set") + } + if !f.IsSet("bool4") { + t.Error("bool4 should be marked as set") + } + if *bool4Flag != false { + t.Error("bool4 flag should be false, is ", *bool4Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if !f.IsSet("string") { + t.Error("string flag should be marked as set") + } + if f.IsSet("string2") { + t.Error("string2 flag should not be marked as set") + } + if *singleQuoteFlag != "single" { + t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) + } + if *doubleQuoteFlag != "double" { + t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) + } + if *mixedQuoteFlag != `'mixed"` { + t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) + } + if *mixed2QuoteFlag != `"mixed2'` { + t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) + } + if *nestedQuoteFlag != "'single nested'" { + t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) + } + if *nested2QuoteFlag != `"double nested"` { + t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testPanic(f *FlagSet, t *testing.T) { + f.Int([]string{"-int"}, 0, "int value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + args := []string{ + "-int", "21", + } + f.Parse(args) +} + +func TestParsePanic(t *testing.T) { + ResetForTesting(func() {}) + testPanic(CommandLine, t) +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(CommandLine, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, []string{"v"}, "usage") + if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +// Declare a user-defined boolean flag type. +type boolFlagVar struct { + count int +} + +func (b *boolFlagVar) String() string { + return fmt.Sprintf("%d", b.count) +} + +func (b *boolFlagVar) Set(value string) error { + if value == "true" { + b.count++ + } + return nil +} + +func (b *boolFlagVar) IsBoolFlag() bool { + return b.count < 4 +} + +func TestUserDefinedBool(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var b boolFlagVar + var err error + flags.Var(&b, []string{"b"}, "usage") + if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { + if b.count < 4 { + t.Error(err) + } + } + + if b.count != 4 { + t.Errorf("want: %d; got: %d", 4, b.count) + } + + if err == nil { + t.Error("expected error; got none") + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} + before := Bool([]string{"before"}, false, "") + if err := CommandLine.Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool([]string{"after"}, false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"-flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by -flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"-help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, []string{"help"}, false, "help flag") + helpCalled = false + err = fs.Parse([]string{"-help"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +// Test the flag count functions. +func TestFlagCounts(t *testing.T) { + fs := NewFlagSet("help test", ContinueOnError) + var flag bool + fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") + fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") + + if fs.FlagCount() != 6 { + t.Fatal("FlagCount wrong. ", fs.FlagCount()) + } + if fs.FlagCountUndeprecated() != 4 { + t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) + } + if fs.NFlag() != 0 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } + err := fs.Parse([]string{"-fd", "-g", "-flag4"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if fs.NFlag() != 4 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } +} + +// Show up bug in sortFlags +func TestSortFlags(t *testing.T) { + fs := NewFlagSet("help TestSortFlags", ContinueOnError) + + var err error + + var b bool + fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") + + err = fs.Parse([]string{"--banana=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + count := 0 + + fs.VisitAll(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("VisitAll should not return a nil flag") + } + }) + flagcount := fs.FlagCount() + if flagcount != count { + t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) + } + // Make sure its idempotent + if flagcount != fs.FlagCount() { + t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) + } + + count = 0 + fs.Visit(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("Visit should not return a nil flag") + } + }) + nflag := fs.NFlag() + if nflag != count { + t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) + } + if nflag != fs.NFlag() { + t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 000000000..e326a1191 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,187 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "net/url" + "path" + "runtime" + "strconv" + "strings" +) + +// ParseHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr +// FIXME: Change this not to receive default value as parameter +func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if runtime.GOOS != "windows" { + addr = fmt.Sprintf("unix://%s", defaultUnixAddr) + } else { + // Note - defaultTCPAddr already includes tcp:// prefix + addr = defaultTCPAddr + } + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// ParseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from addr, or the contents of defaultAddr if addr is a blank string. +func ParseTCPAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + hostParts := strings.Split(u.Host, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + host := hostParts[0] + if host == "" { + host = defaultAddr + } + + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil +} + +// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb +func ParseRepositoryTag(repos string) (string, string) { + n := strings.Index(repos, "@") + if n >= 0 { + parts := strings.Split(repos, "@") + return parts[0], parts[1] + } + n = strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get an HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go new file mode 100644 index 000000000..903c66afb --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -0,0 +1,210 @@ +package parsers + +import ( + "strings" + "testing" +) + +func TestParseHost(t *testing.T) { + var ( + defaultHTTPHost = "127.0.0.1" + defaultUnix = "/var/run/docker.sock" + ) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp://": "Invalid proto, expected tcp: ", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "": "unix:///var/run/docker.sock", + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix:///var/run/docker.sock", + "fd://": "fd://", + "fd://something": "fd://something", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } +} + +func TestParsePortMapping(t *testing.T) { + if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil { + t.Fatalf("Expected an error, got %v", err) + } + data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") + if err != nil { + t.Fatal(err) + } + + if len(data) != 3 { + t.FailNow() + } + if data["ip"] != "192.168.1.1" { + t.Fail() + } + if data["public"] != "80" { + t.Fail() + } + if data["private"] != "8080" { + t.Fail() + } +} + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParsePortRange(t *testing.T) { + if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { + t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) + } +} + +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + +func TestParsePortRangeIncorrectRange(t *testing.T) { + if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectEndRange(t *testing.T) { + if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectStartRange(t *testing.T) { + if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 000000000..515fb4d05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go new file mode 100644 index 000000000..78689800b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go @@ -0,0 +1,162 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + written, err = writer.Write([]byte("barfoo")) + if err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 000000000..dd52b9082 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 000000000..63b3df79f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,168 @@ +package stdcopy + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" +) + +const ( + StdWriterPrefixLen = 8 + StdWriterFdIndex = 0 + StdWriterSizeIndex = 4 +) + +type StdType [StdWriterPrefixLen]byte + +var ( + Stdin StdType = StdType{0: 0} + Stdout StdType = StdType{0: 1} + Stderr StdType = StdType{0: 2} +) + +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + var n1, n2 int + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + n1, err = w.Writer.Write(w.prefix[:]) + if err != nil { + n = n1 - StdWriterPrefixLen + } else { + n2, err = w.Writer.Write(buf) + n = n1 + n2 - StdWriterPrefixLen + } + if n < 0 { + n = 0 + } + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var ErrInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < StdWriterPrefixLen { + logrus.Debugf("Corrupted prefix: %v", buf[:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading header: %s", er) + return 0, er + } + } + + // Check the first byte to know where to write + switch buf[StdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) + return 0, ErrInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + logrus.Debugf("framesize: %d", frameSize) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+StdWriterPrefixLen > bufLen { + logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+StdWriterPrefixLen { + logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading frame: %s", er) + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) + if ew != nil { + logrus.Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+StdWriterPrefixLen:]) + // Move the index + nr -= frameSize + StdWriterPrefixLen + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 000000000..a9fd73a49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,85 @@ +package stdcopy + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := StdWriter{ + Writer: nil, + prefix: Stdout, + sizeBuf: make([]byte, 4), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n) + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 000000000..63045186f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,9 @@ +package system + +import ( + "errors" +) + +var ( + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 000000000..23f7c618b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +const ( + EVENT_ALL_ACCESS = 0x1F0003 + EVENT_MODIFY_STATUS = 0x0002 +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 = 0 + if manualReset { + _p1 = 1 + } + var _p2 uint32 = 0 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 = 0 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 000000000..e1f70e8da --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "os" +) + +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 000000000..90b500608 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,64 @@ +// +build windows + +package system + +import ( + "os" + "regexp" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 000000000..d0e43b370 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go new file mode 100644 index 000000000..6bac492eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go @@ -0,0 +1,28 @@ +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 000000000..eee1be26e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package system + +import ( + "os" +) + +// Some explanation for my own sanity, and hopefully maintainers in the +// future. +// +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. + +func Lstat(path string) (*Stat_t, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &Stat_t{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 000000000..3b6e947e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 000000000..41f2bab60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,71 @@ +package system + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" +) + +var ( + ErrMalformed = errors.New("malformed file") +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given a io.Reader to the file. +// +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go new file mode 100644 index 000000000..87830ccb2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go @@ -0,0 +1,38 @@ +package system + +import ( + "strings" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 000000000..604d33875 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package system + +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 000000000..d46642598 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 000000000..26617eb08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,20 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 000000000..1811542ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package system + +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 000000000..e2ecfe52f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go @@ -0,0 +1,46 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Stat_t type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file +type Stat_t struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +func (s Stat_t) Mode() uint32 { + return s.mode +} + +func (s Stat_t) Uid() uint32 { + return s.uid +} + +func (s Stat_t) Gid() uint32 { + return s.gid +} + +func (s Stat_t) Rdev() uint64 { + return s.rdev +} + +func (s Stat_t) Size() int64 { + return s.size +} + +func (s Stat_t) Mtim() syscall.Timespec { + return s.mtim +} + +func (s Stat_t) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 000000000..4b2198b3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 000000000..80262d951 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.Stat_t from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go new file mode 100644 index 000000000..453412920 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go @@ -0,0 +1,37 @@ +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.Uid() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.Gid() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 000000000..5251ae212 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.Stat_t type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 000000000..b1fd39e83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,36 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +type Stat_t struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +func (s Stat_t) Name() string { + return s.name +} + +func (s Stat_t) Size() int64 { + return s.size +} + +func (s Stat_t) Mode() os.FileMode { + return s.mode +} + +func (s Stat_t) ModTime() time.Time { + return s.modTime +} + +func (s Stat_t) IsDir() bool { + return s.isDir +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 000000000..fddbecd39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 000000000..3be563f89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go new file mode 100644 index 000000000..4c6002fe8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go @@ -0,0 +1,11 @@ +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 000000000..ceaa044c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 000000000..8f9029827 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,28 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + AT_FDCWD := -100 + AT_SYMLINK_NOFOLLOW := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go new file mode 100644 index 000000000..350cce1ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go @@ -0,0 +1,66 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 000000000..adf2734f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 000000000..00edb201b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,59 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Returns a nil slice and nil error if the xattr is not set +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 000000000..0060c167d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package system + +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go new file mode 100644 index 000000000..8fb0d804d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go @@ -0,0 +1,111 @@ +// Package ulimit provides structure and helper function to parse and represent +// resource limits (Rlimit and Ulimit, its human friendly version). +package ulimit + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// Parse parses and returns a Ulimit from the specified string. +func Parse(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + limitVals := strings.SplitN(parts[1], ":", 2) + if len(limitVals) > 2 { + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + soft, err := strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + + hard := soft // in case no hard was set + if len(limitVals) == 2 { + hard, err = strconv.ParseInt(limitVals[1], 10, 64) + } + if soft > hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go new file mode 100644 index 000000000..1e8c881f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go @@ -0,0 +1,55 @@ +package ulimit + +import "testing" + +func TestParseValid(t *testing.T) { + u1 := &Ulimit{"nofile", 1024, 512} + if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { + t.Fatalf("expected %q, but got %q", u1, u2) + } +} + +func TestParseInvalidLimitType(t *testing.T) { + if _, err := Parse("notarealtype=1024:1024"); err == nil { + t.Fatalf("expected error on invalid ulimit type") + } +} + +func TestParseBadFormat(t *testing.T) { + if _, err := Parse("nofile:1024:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := Parse("nofile"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := Parse("nofile="); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := Parse("nofile=:"); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := Parse("nofile=:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } +} + +func TestParseHardLessThanSoft(t *testing.T) { + if _, err := Parse("nofile:1024:1"); err == nil { + t.Fatal("expected error on hard limit less than soft limit") + } +} + +func TestParseInvalidValueType(t *testing.T) { + if _, err := Parse("nofile:asdf"); err == nil { + t.Fatal("expected error on bad value type") + } +} + +func TestStringOutput(t *testing.T) { + u := &Ulimit{"nofile", 1024, 512} + if s := u.String(); s != "nofile=512:1024" { + t.Fatal("expected String to return nofile=512:1024, but got", s) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go new file mode 100644 index 000000000..c219a8a96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go @@ -0,0 +1,33 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go new file mode 100644 index 000000000..fcfb6b7bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go @@ -0,0 +1,46 @@ +package units + +import ( + "testing" + "time" +) + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "8 weeks", HumanDuration(2*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3 years", HumanDuration(3*year+2*month)) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go new file mode 100644 index 000000000..2fde3b412 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go @@ -0,0 +1,95 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + for size >= base { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB"). +func HumanSize(size float64) string { + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go new file mode 100644 index 000000000..67c3b81e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go @@ -0,0 +1,108 @@ +package units + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func TestBytesSize(t *testing.T) { + assertEquals(t, "1 KiB", BytesSize(1024)) + assertEquals(t, "1 MiB", BytesSize(1024*1024)) + assertEquals(t, "1 MiB", BytesSize(1048576)) + assertEquals(t, "2 MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) +} + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1 kB", HumanSize(1000)) + assertEquals(t, "1.024 kB", HumanSize(1024)) + assertEquals(t, "1 MB", HumanSize(1000000)) + assertEquals(t, "1.049 MB", HumanSize(1048576)) + assertEquals(t, "2 MB", HumanSize(2*MB)) + assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) + assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) + assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, "32.3") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32.3Kb") + assertError(t, FromHumanSize, "32 mb") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, "32.3") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32.3Kb") + assertError(t, RAMInBytes, "32 mb") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go new file mode 100644 index 000000000..19c9d77ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go @@ -0,0 +1,61 @@ +package volume + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName string = "local" + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(string) (Volume, error) + // Remove deletes the volume. + Remove(Volume) error +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount() (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount() error +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "rw,Z": true, + "rw,z": true, + "z,rw": true, + "Z,rw": true, + "Z": true, + "z": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, + "ro,Z": true, + "ro,z": true, + "z,ro": true, + "Z,ro": true, +} + +// ValidateMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode and if it's read-write or not. +func ValidateMountMode(mode string) (bool, bool) { + return roModes[mode] || rwModes[mode], rwModes[mode] +} + +// ReadWrite tells you if a mode string is a valid read-only mode or not. +func ReadWrite(mode string) bool { + return rwModes[mode] +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS new file mode 100644 index 000000000..edbe20066 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go new file mode 100644 index 000000000..6f8a982ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go new file mode 100644 index 000000000..758b734c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go new file mode 100644 index 000000000..721794887 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdPath() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupPath() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go new file mode 100644 index 000000000..13226dbfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go @@ -0,0 +1,407 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( + userArg, groupArg string + name string + ) + + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + return u.Uid == user.Uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + user.Uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + + // Check if user is a member. + for _, u := range g.List { + if u == name { + return true + } + } + + return false + }) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + user.Gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + user.Gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroupsPath looks up a list of groups by name or group id +// against the group file. If a group name cannot be found, an error will be +// returned. If a group id cannot be found, it will be returned as-is. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + groupReader, err := os.Open(groupPath) + if err != nil { + return nil, fmt.Errorf("Failed to open group file: %v", err) + } + defer groupReader.Close() + + groups, err := ParseGroupFilter(groupReader, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.Atoi(ag) + if err != nil { + return nil, fmt.Errorf("Unable to find group %s", ag) + } + // Ensure gid is inside gid range. + if gid < minId || gid > maxId { + return nil, ErrRange + } + gidMap[gid] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go new file mode 100644 index 000000000..ffb0760e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go @@ -0,0 +1,443 @@ +package user + +import ( + "fmt" + "io" + "io/ioutil" + "reflect" + "sort" + "strconv" + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := ParsePasswdFilter(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := ParseGroupFilter(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} + +func TestValidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + expected ExecUser + }{ + { + ref: "root", + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{0, 1234}, + Home: "/root", + }, + }, + { + ref: "adm", + expected: ExecUser{ + Uid: 42, + Gid: 43, + Sgids: []int{1234}, + Home: "/var/adm", + }, + }, + { + ref: "root:adm", + expected: ExecUser{ + Uid: 0, + Gid: 43, + Sgids: defaultExecUser.Sgids, + Home: "/root", + }, + }, + { + ref: "adm:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "42:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "1337:1234", + expected: ExecUser{ + Uid: 1337, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "1337", + expected: ExecUser{ + Uid: 1337, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "", + expected: ExecUser{ + Uid: defaultExecUser.Uid, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestInvalidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + tests := []string{ + // No such user/group. + "notuser", + "notuser:notgroup", + "root:notgroup", + "notuser:adm", + "8888:notgroup", + "notuser:8888", + + // Invalid user/group values. + "-1:0", + "0:-3", + "-5:-2", + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test, nil, passwd, group) + if err == nil { + t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) + t.Fail() + continue + } + } +} + +func TestGetExecUserNilSources(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + passwd, group bool + expected ExecUser + }{ + { + ref: "", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "root", + passwd: true, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/root", + }, + }, + { + ref: "0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "0:0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + } + + for _, test := range tests { + var passwd, group io.Reader + + if test.passwd { + passwd = strings.NewReader(passwdContent) + } + + if test.group { + group = strings.NewReader(groupContent) + } + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestGetAdditionalGroupsPath(t *testing.T) { + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +adm:x:4343:root,adm-duplicate +this is just some garbage data +` + tests := []struct { + groups []string + expected []int + hasError bool + }{ + { + // empty group + groups: []string{}, + expected: []int{}, + }, + { + // single group + groups: []string{"adm"}, + expected: []int{43}, + }, + { + // multiple groups + groups: []string{"adm", "grp"}, + expected: []int{43, 1234}, + }, + { + // invalid group + groups: []string{"adm", "grp", "not-exist"}, + expected: nil, + hasError: true, + }, + { + // group with numeric id + groups: []string{"43"}, + expected: []int{43}, + }, + { + // group with unknown numeric id + groups: []string{"adm", "10001"}, + expected: []int{43, 10001}, + }, + { + // groups specified twice with numeric and name + groups: []string{"adm", "43"}, + expected: []int{43}, + }, + { + // groups with too small id + groups: []string{"-1"}, + expected: nil, + hasError: true, + }, + { + // groups with too large id + groups: []string{strconv.Itoa(1 << 31)}, + expected: nil, + hasError: true, + }, + } + + for _, test := range tests { + tmpFile, err := ioutil.TempFile("", "get-additional-groups-path") + if err != nil { + t.Error(err) + } + fmt.Fprint(tmpFile, groupContent) + tmpFile.Close() + + gids, err := GetAdditionalGroupsPath(test.groups, tmpFile.Name()) + if test.hasError && err == nil { + t.Errorf("Parse(%#v) expects error but has none", test) + continue + } + if !test.hasError && err != nil { + t.Errorf("Parse(%#v) has error %v", test, err) + continue + } + sort.Sort(sort.IntSlice(gids)) + if !reflect.DeepEqual(gids, test.expected) { + t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md new file mode 100644 index 000000000..c60a31b05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go new file mode 100644 index 000000000..81cb128b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go new file mode 100644 index 000000000..9814c501e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go new file mode 100644 index 000000000..73c740031 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md new file mode 100644 index 000000000..9a046ff97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md @@ -0,0 +1,235 @@ +mux +=== +[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is `www.example.com`. Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +`www.example.com`, because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +## Full Example + +Here's a complete, runnable example of a small mux based server: + +```go +package main + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + http.ListenAndServe(":8000", r) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go new file mode 100644 index 000000000..c5f97b2b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..49798cb5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go @@ -0,0 +1,206 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..b32e1a051 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go @@ -0,0 +1,469 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" + + "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go new file mode 100644 index 000000000..74cb98b83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,1334 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" +) + +func (r *Route) GoString() string { + matchers := make([]string, len(r.matchers)) + for i, m := range r.matchers { + matchers[i] = fmt.Sprintf("%#v", m) + } + return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) +} + +func (r *routeRegexp) GoString() string { + return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR) +} + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v1:[a-z]{2}(b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with hyphenated name and pattern, match", + route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with hyphenated name and pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v-1:[a-z]{2}(b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple hyphenated names and patterns, match", + route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/a"), + vars: map[string]string{"category": "a"}, + host: "", + path: "/a", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/b/c"), + vars: map[string]string{"category": "b/c"}, + host: "", + path: "/b/c", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/b/c/product_name/1"), + vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, + host: "", + path: "/b/c/product_name/1", + shouldMatch: true, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|(b/c)}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with hyphenated name and pattern, match", + route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns, match", + route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns with pipe, match", + route: new(Route).Path("/{product-category:a|(b/c)}/{product-name}/{product-id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).Headers("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).HeadersRegexp("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?bar=2&foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v1:[0-9]{1}(a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with hyphenated name, match", + route: new(Route).Queries("foo", "{v-1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v-1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple hyphenated names, match", + route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v-1": "bar", "v-2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenate name and pattern, match", + route: new(Route).Queries("foo", "{v-1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v-1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v-1:[0-9]{1}(a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v-1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value and no parameter in request, should not match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value and empty parameter in request, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with overlapping value, should not match", + route: new(Route).Queries("foo", "bar"), + request: newRequest("GET", "http://localhost?foo=barfoo"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with no parameter in request, should not match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty parameter in request, should match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{"foo": ""}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad submatch", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestBuildVarsFunc(t *testing.T) { + tests := []routeTest{ + { + title: "BuildVarsFunc set on route", + route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "3" + vars["v2"] = "a" + return vars + }), + request: newRequest("GET", "http://localhost/111/2"), + path: "/111/3a", + shouldMatch: true, + }, + { + title: "BuildVarsFunc set on route and parent route", + route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "2" + return vars + }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v2"] = "b" + return vars + }), + request: newRequest("GET", "http://localhost/1/a"), + path: "/2/b", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestWalkSingleDepth(t *testing.T) { + r0 := NewRouter() + r1 := NewRouter() + r2 := NewRouter() + + r0.Path("/g") + r0.Path("/o") + r0.Path("/d").Handler(r1) + r0.Path("/r").Handler(r2) + r0.Path("/a") + + r1.Path("/z") + r1.Path("/i") + r1.Path("/l") + r1.Path("/l") + + r2.Path("/i") + r2.Path("/l") + r2.Path("/l") + + paths := []string{"g", "o", "r", "i", "l", "l", "a"} + depths := []int{0, 0, 0, 1, 1, 1, 0} + i := 0 + err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { + matcher := route.matchers[0].(*routeRegexp) + if matcher.template == "/d" { + return SkipRouter + } + if len(ancestors) != depths[i] { + t.Errorf(`Expected depth of %d at i = %d; got "%s"`, depths[i], i, len(ancestors)) + } + if matcher.template != "/"+paths[i] { + t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) + } + i++ + return nil + }) + if err != nil { + panic(err) + } + if i != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), i) + } +} + +func TestWalkNested(t *testing.T) { + router := NewRouter() + + g := router.Path("/g").Subrouter() + o := g.PathPrefix("/o").Subrouter() + r := o.PathPrefix("/r").Subrouter() + i := r.PathPrefix("/i").Subrouter() + l1 := i.PathPrefix("/l").Subrouter() + l2 := l1.PathPrefix("/l").Subrouter() + l2.Path("/a") + + paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} + idx := 0 + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + path := paths[idx] + tpl := route.regexp.path.template + if tpl != path { + t.Errorf(`Expected %s got %s`, path, tpl) + } + idx++ + return nil + }) + if err != nil { + panic(err) + } + if idx != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), idx) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go new file mode 100644 index 000000000..755db483e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.example.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.example.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.example.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..06728dd54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go @@ -0,0 +1,317 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]*" + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + varIdx := i / 2 + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[varIdx] = name + varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.matchQueryString(req) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getUrlQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getUrlQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getUrlQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + subexpNames := v.host.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.host.varsN[varName]] = hostVars[i+1] + varName++ + } + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + subexpNames := v.path.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.path.varsN[varName]] = pathVars[i+1] + varName++ + } + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) + if queryVars != nil { + subexpNames := q.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[q.varsN[varName]] = queryVars[i+1] + varName++ + } + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..890130460 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go @@ -0,0 +1,603 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// Alternatively, you can provide a regular expression and match the header as follows: +// +// r.Headers("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will the same as the previous example, with the addition of matching +// application/text as well. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// Regular expressions can be used with headers as well. +// It accepts a sequence of key/value pairs, where the value has regex support. For example +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS new file mode 100644 index 000000000..edbe20066 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go new file mode 100644 index 000000000..6f8a982ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go new file mode 100644 index 000000000..758b734c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go new file mode 100644 index 000000000..721794887 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdPath() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupPath() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go new file mode 100644 index 000000000..964e31bfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go @@ -0,0 +1,413 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( + userArg, groupArg string + name string + ) + + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + return u.Uid == user.Uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + user.Uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + + // Check if user is a member. + for _, u := range g.List { + if u == name { + return true + } + } + + return false + }) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + user.Gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + user.Gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id against +// against the given /etc/group formatted data. If a group name cannot be found, +// an error will be returned. If a group id cannot be found, it will be returned +// as-is. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + groups, err := ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.Atoi(ag) + if err != nil { + return nil, fmt.Errorf("Unable to find group %s", ag) + } + // Ensure gid is inside gid range. + if gid < minId || gid > maxId { + return nil, ErrRange + } + gidMap[gid] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// Wrapper around GetAdditionalGroups that opens the groupPath given and gives +// it as an argument to GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + group, err := os.Open(groupPath) + if err != nil { + return nil, fmt.Errorf("Failed to open group file: %v", err) + } + defer group.Close() + return GetAdditionalGroups(additionalGroups, group) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go new file mode 100644 index 000000000..0e37ac3dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go @@ -0,0 +1,436 @@ +package user + +import ( + "io" + "reflect" + "sort" + "strconv" + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := ParsePasswdFilter(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := ParseGroupFilter(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} + +func TestValidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + expected ExecUser + }{ + { + ref: "root", + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{0, 1234}, + Home: "/root", + }, + }, + { + ref: "adm", + expected: ExecUser{ + Uid: 42, + Gid: 43, + Sgids: []int{1234}, + Home: "/var/adm", + }, + }, + { + ref: "root:adm", + expected: ExecUser{ + Uid: 0, + Gid: 43, + Sgids: defaultExecUser.Sgids, + Home: "/root", + }, + }, + { + ref: "adm:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "42:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "1337:1234", + expected: ExecUser{ + Uid: 1337, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "1337", + expected: ExecUser{ + Uid: 1337, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "", + expected: ExecUser{ + Uid: defaultExecUser.Uid, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestInvalidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + tests := []string{ + // No such user/group. + "notuser", + "notuser:notgroup", + "root:notgroup", + "notuser:adm", + "8888:notgroup", + "notuser:8888", + + // Invalid user/group values. + "-1:0", + "0:-3", + "-5:-2", + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test, nil, passwd, group) + if err == nil { + t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) + t.Fail() + continue + } + } +} + +func TestGetExecUserNilSources(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + passwd, group bool + expected ExecUser + }{ + { + ref: "", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "root", + passwd: true, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/root", + }, + }, + { + ref: "0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "0:0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + } + + for _, test := range tests { + var passwd, group io.Reader + + if test.passwd { + passwd = strings.NewReader(passwdContent) + } + + if test.group { + group = strings.NewReader(groupContent) + } + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestGetAdditionalGroups(t *testing.T) { + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +adm:x:4343:root,adm-duplicate +this is just some garbage data +` + tests := []struct { + groups []string + expected []int + hasError bool + }{ + { + // empty group + groups: []string{}, + expected: []int{}, + }, + { + // single group + groups: []string{"adm"}, + expected: []int{43}, + }, + { + // multiple groups + groups: []string{"adm", "grp"}, + expected: []int{43, 1234}, + }, + { + // invalid group + groups: []string{"adm", "grp", "not-exist"}, + expected: nil, + hasError: true, + }, + { + // group with numeric id + groups: []string{"43"}, + expected: []int{43}, + }, + { + // group with unknown numeric id + groups: []string{"adm", "10001"}, + expected: []int{43, 10001}, + }, + { + // groups specified twice with numeric and name + groups: []string{"adm", "43"}, + expected: []int{43}, + }, + { + // groups with too small id + groups: []string{"-1"}, + expected: nil, + hasError: true, + }, + { + // groups with too large id + groups: []string{strconv.Itoa(1 << 31)}, + expected: nil, + hasError: true, + }, + } + + for _, test := range tests { + group := strings.NewReader(groupContent) + + gids, err := GetAdditionalGroups(test.groups, group) + if test.hasError && err == nil { + t.Errorf("Parse(%#v) expects error but has none", test) + continue + } + if !test.hasError && err != nil { + t.Errorf("Parse(%#v) has error %v", test, err) + continue + } + sort.Sort(sort.IntSlice(gids)) + if !reflect.DeepEqual(gids, test.expected) { + t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go new file mode 100644 index 000000000..4bceb0d3d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go @@ -0,0 +1,537 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" +) + +// APIImages represent an image returned in the ListImages call. +type APIImages struct { + ID string `json:"Id" yaml:"Id"` + RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` + ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Image is the type representing a docker image and its various properties +type Image struct { + ID string `json:"Id" yaml:"Id"` + Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"` + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty"` + ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty" yaml:"Author,omitempty"` + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` +} + +// ImageHistory represent a layer in an image's history returned by the +// ImageHistory call. +type ImageHistory struct { + ID string `json:"Id" yaml:"Id"` + Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// ListImagesOptions specify parameters to the ListImages function. +// +// See http://goo.gl/HRVN1Z for more details. +type ListImagesOptions struct { + All bool + Filters map[string][]string + Digests bool +} + +var ( + // ErrNoSuchImage is the error returned when the image does not exist. + ErrNoSuchImage = errors.New("no such image") + + // ErrMissingRepo is the error returned when the remote repository is + // missing. + ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") + + // ErrMissingOutputStream is the error returned when no output stream + // is provided to some calls, like BuildImage. + ErrMissingOutputStream = errors.New("missing output stream") + + // ErrMultipleContexts is the error returned when both a ContextDir and + // InputStream are provided in BuildImageOptions + ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") + + // ErrMustSpecifyNames is the error rreturned when the Names field on + // ExportImagesOptions is nil or empty + ErrMustSpecifyNames = errors.New("must specify at least one name to export") +) + +// ListImages returns the list of available images in the server. +// +// See http://goo.gl/HRVN1Z for more details. +func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { + path := "/images/json?" + queryString(opts) + body, _, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + var images []APIImages + err = json.Unmarshal(body, &images) + if err != nil { + return nil, err + } + return images, nil +} + +// ImageHistory returns the history of the image by its name or ID. +// +// See http://goo.gl/2oJmNs for more details. +func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { + body, status, err := c.do("GET", "/images/"+name+"/history", doOptions{}) + if status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if err != nil { + return nil, err + } + var history []ImageHistory + err = json.Unmarshal(body, &history) + if err != nil { + return nil, err + } + return history, nil +} + +// RemoveImage removes an image by its name or ID. +// +// See http://goo.gl/znj0wM for more details. +func (c *Client) RemoveImage(name string) error { + _, status, err := c.do("DELETE", "/images/"+name, doOptions{}) + if status == http.StatusNotFound { + return ErrNoSuchImage + } + return err +} + +// RemoveImageOptions present the set of options available for removing an image +// from a registry. +// +// See http://goo.gl/6V48bF for more details. +type RemoveImageOptions struct { + Force bool `qs:"force"` + NoPrune bool `qs:"noprune"` +} + +// RemoveImageExtended removes an image by its name or ID. +// Extra params can be passed, see RemoveImageOptions +// +// See http://goo.gl/znj0wM for more details. +func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { + uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) + _, status, err := c.do("DELETE", uri, doOptions{}) + if status == http.StatusNotFound { + return ErrNoSuchImage + } + return err +} + +// InspectImage returns an image by its name or ID. +// +// See http://goo.gl/Q112NY for more details. +func (c *Client) InspectImage(name string) (*Image, error) { + body, status, err := c.do("GET", "/images/"+name+"/json", doOptions{}) + if status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if err != nil { + return nil, err + } + + var image Image + + // if the caller elected to skip checking the server's version, assume it's the latest + if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { + err = json.Unmarshal(body, &image) + if err != nil { + return nil, err + } + } else { + var imagePre012 ImagePre012 + err = json.Unmarshal(body, &imagePre012) + if err != nil { + return nil, err + } + + image.ID = imagePre012.ID + image.Parent = imagePre012.Parent + image.Comment = imagePre012.Comment + image.Created = imagePre012.Created + image.Container = imagePre012.Container + image.ContainerConfig = imagePre012.ContainerConfig + image.DockerVersion = imagePre012.DockerVersion + image.Author = imagePre012.Author + image.Config = imagePre012.Config + image.Architecture = imagePre012.Architecture + image.Size = imagePre012.Size + } + + return &image, nil +} + +// PushImageOptions represents options to use in the PushImage method. +// +// See http://goo.gl/pN8A3P for more details. +type PushImageOptions struct { + // Name of the image + Name string + + // Tag of the image + Tag string + + // Registry server to push the image + Registry string + + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` +} + +// PushImage pushes an image to a remote registry, logging progress to w. +// +// An empty instance of AuthConfiguration may be used for unauthenticated +// pushes. +// +// See http://goo.gl/pN8A3P for more details. +func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { + if opts.Name == "" { + return ErrNoSuchImage + } + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + name := opts.Name + opts.Name = "" + path := "/images/" + name + "/push?" + queryString(&opts) + return c.stream("POST", path, streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + stdout: opts.OutputStream, + }) +} + +// PullImageOptions present the set of options available for pulling an image +// from a registry. +// +// See http://goo.gl/ACyYNS for more details. +type PullImageOptions struct { + Repository string `qs:"fromImage"` + Registry string + Tag string + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` +} + +// PullImage pulls an image from a remote registry, logging progress to opts.OutputStream. +// +// See http://goo.gl/ACyYNS for more details. +func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream) +} + +func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error { + path := "/images/create?" + qs + return c.stream("POST", path, streamOptions{ + setRawTerminal: true, + rawJSONStream: rawJSONStream, + headers: headers, + in: in, + stdout: w, + }) +} + +// LoadImageOptions represents the options for LoadImage Docker API Call +// +// See http://goo.gl/Y8NNCq for more details. +type LoadImageOptions struct { + InputStream io.Reader +} + +// LoadImage imports a tarball docker image +// +// See http://goo.gl/Y8NNCq for more details. +func (c *Client) LoadImage(opts LoadImageOptions) error { + return c.stream("POST", "/images/load", streamOptions{ + setRawTerminal: true, + in: opts.InputStream, + }) +} + +// ExportImageOptions represent the options for ExportImage Docker API call +// +// See http://goo.gl/mi6kvk for more details. +type ExportImageOptions struct { + Name string + OutputStream io.Writer +} + +// ExportImage exports an image (as a tar file) into the stream +// +// See http://goo.gl/mi6kvk for more details. +func (c *Client) ExportImage(opts ExportImageOptions) error { + return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// ExportImagesOptions represent the options for ExportImages Docker API call +// +// See http://goo.gl/YeZzQK for more details. +type ExportImagesOptions struct { + Names []string + OutputStream io.Writer `qs:"-"` +} + +// ExportImages exports one or more images (as a tar file) into the stream +// +// See http://goo.gl/YeZzQK for more details. +func (c *Client) ExportImages(opts ExportImagesOptions) error { + if opts.Names == nil || len(opts.Names) == 0 { + return ErrMustSpecifyNames + } + return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + }) +} + +// ImportImageOptions present the set of informations available for importing +// an image from a source file or the stdin. +// +// See http://goo.gl/PhBKnS for more details. +type ImportImageOptions struct { + Repository string `qs:"repo"` + Source string `qs:"fromSrc"` + Tag string `qs:"tag"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` +} + +// ImportImage imports an image from a url, a file or stdin +// +// See http://goo.gl/PhBKnS for more details. +func (c *Client) ImportImage(opts ImportImageOptions) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + if opts.Source != "-" { + opts.InputStream = nil + } + if opts.Source != "-" && !isURL(opts.Source) { + f, err := os.Open(opts.Source) + if err != nil { + return err + } + b, err := ioutil.ReadAll(f) + opts.InputStream = bytes.NewBuffer(b) + opts.Source = "-" + } + return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream) +} + +// BuildImageOptions present the set of informations available for building an +// image from a tarfile with a Dockerfile in it. +// +// For more details about the Docker building process, see +// http://goo.gl/tlPXPu. +type BuildImageOptions struct { + Name string `qs:"t"` + Dockerfile string `qs:"dockerfile"` + NoCache bool `qs:"nocache"` + SuppressOutput bool `qs:"q"` + Pull bool `qs:"pull"` + RmTmpContainer bool `qs:"rm"` + ForceRmTmpContainer bool `qs:"forcerm"` + Memory int64 `qs:"memory"` + Memswap int64 `qs:"memswap"` + CPUShares int64 `qs:"cpushares"` + CPUSetCPUs string `qs:"cpusetcpus"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + Remote string `qs:"remote"` + Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header + AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header + ContextDir string `qs:"-"` +} + +// BuildImage builds an image from a tarball's url or a Dockerfile in the input +// stream. +// +// See http://goo.gl/7nuGXa for more details. +func (c *Client) BuildImage(opts BuildImageOptions) error { + if opts.OutputStream == nil { + return ErrMissingOutputStream + } + headers, err := headersWithAuth(opts.Auth, opts.AuthConfigs) + if err != nil { + return err + } + + if opts.Remote != "" && opts.Name == "" { + opts.Name = opts.Remote + } + if opts.InputStream != nil || opts.ContextDir != "" { + headers["Content-Type"] = "application/tar" + } else if opts.Remote == "" { + return ErrMissingRepo + } + if opts.ContextDir != "" { + if opts.InputStream != nil { + return ErrMultipleContexts + } + var err error + if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { + return err + } + } + + return c.stream("POST", fmt.Sprintf("/build?%s", queryString(&opts)), streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + in: opts.InputStream, + stdout: opts.OutputStream, + }) +} + +// TagImageOptions present the set of options to tag an image. +// +// See http://goo.gl/5g6qFy for more details. +type TagImageOptions struct { + Repo string + Tag string + Force bool +} + +// TagImage adds a tag to the image identified by the given name. +// +// See http://goo.gl/5g6qFy for more details. +func (c *Client) TagImage(name string, opts TagImageOptions) error { + if name == "" { + return ErrNoSuchImage + } + _, status, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s", + queryString(&opts)), doOptions{}) + + if status == http.StatusNotFound { + return ErrNoSuchImage + } + + return err +} + +func isURL(u string) bool { + p, err := url.Parse(u) + if err != nil { + return false + } + return p.Scheme == "http" || p.Scheme == "https" +} + +func headersWithAuth(auths ...interface{}) (map[string]string, error) { + var headers = make(map[string]string) + + for _, auth := range auths { + switch auth.(type) { + case AuthConfiguration: + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + case AuthConfigurations: + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + } + + return headers, nil +} + +// APIImageSearch reflect the result of a search on the dockerHub +// +// See http://goo.gl/xI5lLZ for more details. +type APIImageSearch struct { + Description string `json:"description,omitempty" yaml:"description,omitempty"` + IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"` + IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"` +} + +// SearchImages search the docker hub with a specific given term. +// +// See http://goo.gl/xI5lLZ for more details. +func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { + body, _, err := c.do("GET", "/images/search?term="+term, doOptions{}) + if err != nil { + return nil, err + } + var searchResult []APIImageSearch + err = json.Unmarshal(body, &searchResult) + if err != nil { + return nil, err + } + return searchResult, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go new file mode 100644 index 000000000..d6bce64cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go @@ -0,0 +1,967 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" +) + +func newTestClient(rt *FakeRoundTripper) Client { + endpoint := "http://localhost:4243" + u, _ := parseEndpoint("http://localhost:4243", false) + client := Client{ + HTTPClient: &http.Client{Transport: rt}, + endpoint: endpoint, + endpointURL: u, + SkipServerVersionCheck: true, + } + return client +} + +type stdoutMock struct { + *bytes.Buffer +} + +func (m stdoutMock) Close() error { + return nil +} + +type stdinMock struct { + *bytes.Buffer +} + +func (m stdinMock) Close() error { + return nil +} + +func TestListImages(t *testing.T) { + body := `[ + { + "Repository":"base", + "Tag":"ubuntu-12.10", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "Repository":"base", + "Tag":"ubuntu-quantal", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "RepoTag": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTag": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2e", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } +]` + var expected []APIImages + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + images, err := client.ListImages(ListImagesOptions{}) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(images, expected) { + t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images) + } +} + +func TestListImagesParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK} + client := newTestClient(fakeRT) + _, err := client.ListImages(ListImagesOptions{All: false}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ListImages({All: false}: Wrong HTTP method. Want GET. Got %s.", req.Method) + } + if all := req.URL.Query().Get("all"); all != "0" && all != "" { + t.Errorf("ListImages({All: false}): Wrong parameter. Want all=0 or not present at all. Got all=%s", all) + } + fakeRT.Reset() + _, err = client.ListImages(ListImagesOptions{All: true}) + if err != nil { + t.Fatal(err) + } + req = fakeRT.requests[0] + if all := req.URL.Query().Get("all"); all != "1" { + t.Errorf("ListImages({All: true}): Wrong parameter. Want all=1. Got all=%s", all) + } + fakeRT.Reset() + _, err = client.ListImages(ListImagesOptions{Filters: map[string][]string{ + "dangling": {"true"}, + }}) + if err != nil { + t.Fatal(err) + } + req = fakeRT.requests[0] + body := req.URL.Query().Get("filters") + var filters map[string][]string + err = json.Unmarshal([]byte(body), &filters) + if err != nil { + t.Fatal(err) + } + if len(filters["dangling"]) != 1 || filters["dangling"][0] != "true" { + t.Errorf("ListImages(dangling=[true]): Wrong filter map. Want dangling=[true], got dangling=%v", filters["dangling"]) + } +} + +func TestImageHistory(t *testing.T) { + body := `[ + { + "Id": "25daec02219d2d852f7526137213a9b199926b4b24e732eab5b8bc6c49bd470e", + "Tags": [ + "debian:7.6", + "debian:latest", + "debian:7", + "debian:wheezy" + ], + "Created": 1409856216, + "CreatedBy": "/bin/sh -c #(nop) CMD [/bin/bash]" + }, + { + "Id": "41026a5347fb5be6ed16115bf22df8569697139f246186de9ae8d4f67c335dce", + "Created": 1409856213, + "CreatedBy": "/bin/sh -c #(nop) ADD file:1ee9e97209d00e3416a4543b23574cc7259684741a46bbcbc755909b8a053a38 in /", + "Size": 85178663 + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Tags": [ + "scratch:latest" + ], + "Created": 1371157430 + } +]` + var expected []ImageHistory + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + history, err := client.ImageHistory("debian:latest") + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(history, expected) { + t.Errorf("ImageHistory: Wrong return value. Want %#v. Got %#v.", expected, history) + } +} + +func TestRemoveImage(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + err := client.RemoveImage(name) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) + err := client.RemoveImage("test:") + if err != ErrNoSuchImage { + t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestRemoveImageExtended(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + err := client.RemoveImageExtended(name, RemoveImageOptions{Force: true, NoPrune: true}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } + expectedQuery := "force=1&noprune=1" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestInspectImage(t *testing.T) { + body := `{ + "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent":"27cf784147099545", + "Created":"2013-03-23T22:24:18.818426Z", + "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig":{"Memory":1}, + "VirtualSize":12345 +}` + + created, err := time.Parse(time.RFC3339Nano, "2013-03-23T22:24:18.818426Z") + if err != nil { + t.Fatal(err) + } + + expected := Image{ + ID: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + Parent: "27cf784147099545", + Created: created, + Container: "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + ContainerConfig: Config{ + Memory: 1, + }, + VirtualSize: 12345, + } + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + image, err := client.InspectImage(expected.ID) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*image, expected) { + t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json")) + if req.URL.Path != u.Path { + t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path) + } +} + +func TestInspectImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) + name := "test" + image, err := client.InspectImage(name) + if image != nil { + t.Errorf("InspectImage(%q): expected image, got %#v.", name, image) + } + if err != ErrNoSuchImage { + t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err) + } +} + +func TestPushImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + expected := "Pushing 1/100" + if buf.String() != expected { + t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String()) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/images/test/push")) + if req.URL.Path != u.Path { + t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } + if query := req.URL.Query().Encode(); query != "" { + t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query) + } + + auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) + if err != nil { + t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) + } + if strings.TrimSpace(string(auth)) != "{}" { + t.Errorf("PushImage: wrong body. Want %q. Got %q.", + base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) + } +} + +func TestPushImageWithRawJSON(t *testing.T) { + body := ` + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"status":"Image successfully pushed"} + ` + fakeRT := &FakeRoundTripper{ + message: body, + status: http.StatusOK, + header: map[string]string{ + "Content-Type": "application/json", + }, + } + client := newTestClient(fakeRT) + var buf bytes.Buffer + + err := client.PushImage(PushImageOptions{ + Name: "test", + OutputStream: &buf, + RawJSONStream: true, + }, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + if buf.String() != body { + t.Errorf("PushImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) + } +} + +func TestPushImageWithAuthentication(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + inputAuth := AuthConfiguration{ + Username: "gopher", + Password: "gopher123", + Email: "gopher@tsuru.io", + } + err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + var gotAuth AuthConfiguration + + auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) + if err != nil { + t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) + } + + err = json.Unmarshal(auth, &gotAuth) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotAuth, inputAuth) { + t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth) + } +} + +func TestPushImageCustomRegistry(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var authConfig AuthConfiguration + var buf bytes.Buffer + opts := PushImageOptions{ + Name: "test", Registry: "docker.tsuru.io", + OutputStream: &buf, + } + err := client.PushImage(opts, authConfig) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedQuery := "registry=docker.tsuru.io" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestPushImageNoName(t *testing.T) { + client := Client{} + err := client.PushImage(PushImageOptions{}, AuthConfiguration{}) + if err != ErrNoSuchImage { + t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestPullImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf}, + AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + expected := "Pulling 1/100" + if buf.String() != expected { + t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/images/create")) + if req.URL.Path != u.Path { + t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQuery := "fromImage=base" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestPullImageWithRawJSON(t *testing.T) { + body := ` + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + ` + fakeRT := &FakeRoundTripper{ + message: body, + status: http.StatusOK, + header: map[string]string{ + "Content-Type": "application/json", + }, + } + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PullImage(PullImageOptions{ + Repository: "base", + OutputStream: &buf, + RawJSONStream: true, + }, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + if buf.String() != body { + t.Errorf("PullImage: Wrong raw output. Want %q. Got %q", body, buf.String()) + } +} + +func TestPullImageWithoutOutputStream(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageCustomRegistry(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + OutputStream: &buf, + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageTag(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + Tag: "latest", + OutputStream: &buf, + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageNoRepository(t *testing.T) { + var opts PullImageOptions + client := Client{} + err := client.PullImage(opts, AuthConfiguration{}) + if err != ErrNoSuchImage { + t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestImportImageFromUrl(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := ImportImageOptions{ + Source: "http://mycompany.com/file.tar", + Repository: "testimage", + Tag: "tag", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestImportImageFromInput(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + in := bytes.NewBufferString("tar content") + var buf bytes.Buffer + opts := ImportImageOptions{ + Source: "-", Repository: "testimage", + InputStream: in, OutputStream: &buf, + Tag: "tag", + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) + } + e := "tar content" + if string(body) != e { + t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body)) + } +} + +func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + in := bytes.NewBufferString("foo") + opts := ImportImageOptions{ + Source: "http://test.com/container.tar", Repository: "testimage", + InputStream: in, OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) + } + if string(body) != "" { + t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body)) + } +} + +func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + tarPath := "testing/data/container.tar" + opts := ImportImageOptions{ + Source: tarPath, Repository: "testimage", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + tar, err := os.Open(tarPath) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + tarContent, err := ioutil.ReadAll(tar) + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tarContent, body) { + t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body) + } +} + +func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + tarPath := "testing/data/container.tar" + opts := ImportImageOptions{ + Source: tarPath, Repository: "testimage", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + Pull: true, + RmTmpContainer: true, + ForceRmTmpContainer: true, + Memory: 1024, + Memswap: 2048, + CPUShares: 10, + CPUSetCPUs: "0-3", + InputStream: &buf, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil && strings.Index(err.Error(), "build image fail") == -1 { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{ + "t": {opts.Name}, + "nocache": {"1"}, + "q": {"1"}, + "pull": {"1"}, + "rm": {"1"}, + "forcerm": {"1"}, + "memory": {"1024"}, + "memswap": {"2048"}, + "cpushares": {"10"}, + "cpusetcpus": {"0-3"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageParametersForRemoteBuild(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + Remote: "testing/data/container.tar", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageMissingRepoAndNilInput(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != ErrMissingRepo { + t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err) + } +} + +func TestBuildImageMissingOutputStream(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := BuildImageOptions{Name: "testImage"} + err := client.BuildImage(opts) + if err != ErrMissingOutputStream { + t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err) + } +} + +func TestBuildImageWithRawJSON(t *testing.T) { + body := ` + {"stream":"Step 0 : FROM ubuntu:latest\n"} + {"stream":" ---\u003e 4300eb9d3c8d\n"} + {"stream":"Step 1 : MAINTAINER docker \n"} + {"stream":" ---\u003e Using cache\n"} + {"stream":" ---\u003e 3a3ed758c370\n"} + {"stream":"Step 2 : CMD /usr/bin/top\n"} + {"stream":" ---\u003e Running in 36b1479cc2e4\n"} + {"stream":" ---\u003e 4b6188aebe39\n"} + {"stream":"Removing intermediate container 36b1479cc2e4\n"} + {"stream":"Successfully built 4b6188aebe39\n"} + ` + fakeRT := &FakeRoundTripper{ + message: body, + status: http.StatusOK, + header: map[string]string{ + "Content-Type": "application/json", + }, + } + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + RmTmpContainer: true, + InputStream: &buf, + OutputStream: &buf, + RawJSONStream: true, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + if buf.String() != body { + t.Errorf("BuildImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) + } +} + +func TestBuildImageRemoteWithoutName(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Remote: "testing/data/container.tar", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestTagImageParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := TagImageOptions{Repo: "testImage"} + err := client.TagImage("base", opts) + if err != nil && strings.Index(err.Error(), "tag image fail") == -1 { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := "http://localhost:4243/images/base/tag?repo=testImage" + got := req.URL.String() + if !reflect.DeepEqual(got, expected) { + t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestTagImageMissingRepo(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := TagImageOptions{Repo: "testImage"} + err := client.TagImage("", opts) + if err != ErrNoSuchImage { + t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.", + ErrNoSuchImage, err) + } +} + +func TestIsUrl(t *testing.T) { + url := "http://foo.bar/" + result := isURL(url) + if !result { + t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result) + } + url = "/foo/bar.tar" + result = isURL(url) + if result { + t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result) + } +} + +func TestLoadImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + tar, err := os.Open("testing/data/container.tar") + if err != nil { + t.Fatal(err) + } else { + defer tar.Close() + } + opts := LoadImageOptions{InputStream: tar} + err = client.LoadImage(opts) + if nil != err { + t.Error(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("LoadImage: wrong method. Expected %q. Got %q.", "POST", req.Method) + } + if req.URL.Path != "/images/load" { + t.Errorf("LoadImage: wrong URL. Expected %q. Got %q.", "/images/load", req.URL.Path) + } +} + +func TestExportImage(t *testing.T) { + var buf bytes.Buffer + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := ExportImageOptions{Name: "testimage", OutputStream: &buf} + err := client.ExportImage(opts) + if nil != err { + t.Error(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) + } + expectedPath := "/images/testimage/get" + if req.URL.Path != expectedPath { + t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expectedPath, req.URL.Path) + } +} + +func TestExportImages(t *testing.T) { + var buf bytes.Buffer + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := ExportImagesOptions{Names: []string{"testimage1", "testimage2:latest"}, OutputStream: &buf} + err := client.ExportImages(opts) + if nil != err { + t.Error(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) + } + expected := "http://localhost:4243/images/get?names=testimage1&names=testimage2%3Alatest" + got := req.URL.String() + if !reflect.DeepEqual(got, expected) { + t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expected, got) + } +} + +func TestExportImagesNoNames(t *testing.T) { + var buf bytes.Buffer + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := ExportImagesOptions{Names: []string{}, OutputStream: &buf} + err := client.ExportImages(opts) + if err == nil { + t.Error("Expected an error") + } + if err != ErrMustSpecifyNames { + t.Error(err) + } +} + +func TestSearchImages(t *testing.T) { + body := `[ + { + "description":"A container with Cassandra 2.0.3", + "is_official":true, + "is_automated":true, + "name":"poklet/cassandra", + "star_count":17 + }, + { + "description":"A container with Cassandra 2.0.3", + "is_official":true, + "is_automated":false, + "name":"poklet/cassandra", + "star_count":17 + } + , + { + "description":"A container with Cassandra 2.0.3", + "is_official":false, + "is_automated":true, + "name":"poklet/cassandra", + "star_count":17 + } +]` + var expected []APIImageSearch + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + result, err := client.SearchImages("cassandra") + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go new file mode 100644 index 000000000..42d1c7e48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go @@ -0,0 +1,59 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "strings" +) + +// Version returns version information about the docker server. +// +// See http://goo.gl/BOZrF5 for more details. +func (c *Client) Version() (*Env, error) { + body, _, err := c.do("GET", "/version", doOptions{}) + if err != nil { + return nil, err + } + var env Env + if err := env.Decode(bytes.NewReader(body)); err != nil { + return nil, err + } + return &env, nil +} + +// Info returns system-wide information about the Docker server. +// +// See http://goo.gl/wmqZsW for more details. +func (c *Client) Info() (*Env, error) { + body, _, err := c.do("GET", "/info", doOptions{}) + if err != nil { + return nil, err + } + var info Env + err = info.Decode(bytes.NewReader(body)) + if err != nil { + return nil, err + } + return &info, nil +} + +// ParseRepositoryTag gets the name of the repository and returns it splitted +// in two parts: the repository and the tag. +// +// Some examples: +// +// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest +// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" +func ParseRepositoryTag(repoTag string) (repository string, tag string) { + n := strings.LastIndex(repoTag, ":") + if n < 0 { + return repoTag, "" + } + if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { + return repoTag[:n], tag + } + return repoTag, "" +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go new file mode 100644 index 000000000..ceaf076ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go @@ -0,0 +1,159 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "net/http" + "net/url" + "reflect" + "sort" + "testing" +) + +type DockerVersion struct { + Version string + GitCommit string + GoVersion string +} + +func TestVersion(t *testing.T) { + body := `{ + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" +}` + fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(&fakeRT) + expected := DockerVersion{ + Version: "0.2.2", + GitCommit: "5a2a5cc+CHANGES", + GoVersion: "go1.0.3", + } + version, err := client.Version() + if err != nil { + t.Fatal(err) + } + + if result := version.Get("Version"); result != expected.Version { + t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version")) + } + if result := version.Get("GitCommit"); result != expected.GitCommit { + t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit")) + } + if result := version.Get("GoVersion"); result != expected.GoVersion { + t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion")) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/version")) + if req.URL.Path != u.Path { + t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestVersionError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + version, err := client.Version() + if version != nil { + t.Errorf("Version(): expected value, got %#v.", version) + } + if err == nil { + t.Error("Version(): unexpected error") + } +} + +func TestInfo(t *testing.T) { + body := `{ + "Containers":11, + "Images":16, + "Debug":0, + "NFd":11, + "NGoroutines":21, + "MemoryLimit":1, + "SwapLimit":0 +}` + fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(&fakeRT) + expected := Env{} + expected.SetInt("Containers", 11) + expected.SetInt("Images", 16) + expected.SetBool("Debug", false) + expected.SetInt("NFd", 11) + expected.SetInt("NGoroutines", 21) + expected.SetBool("MemoryLimit", true) + expected.SetBool("SwapLimit", false) + info, err := client.Info() + if err != nil { + t.Fatal(err) + } + infoSlice := []string(*info) + expectedSlice := []string(expected) + sort.Strings(infoSlice) + sort.Strings(expectedSlice) + if !reflect.DeepEqual(expectedSlice, infoSlice) { + t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/info")) + if req.URL.Path != u.Path { + t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestInfoError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + version, err := client.Info() + if version != nil { + t.Errorf("Info(): expected value, got %#v.", version) + } + if err == nil { + t.Error("Info(): unexpected error") + } +} + +func TestParseRepositoryTag(t *testing.T) { + var tests = []struct { + input string + expectedRepo string + expectedTag string + }{ + { + "localhost.localdomain:5000/samalba/hipache:latest", + "localhost.localdomain:5000/samalba/hipache", + "latest", + }, + { + "localhost.localdomain:5000/samalba/hipache", + "localhost.localdomain:5000/samalba/hipache", + "", + }, + { + "tsuru/python", + "tsuru/python", + "", + }, + { + "tsuru/python:2.7", + "tsuru/python", + "2.7", + }, + } + for _, tt := range tests { + repo, tag := ParseRepositoryTag(tt.input) + if repo != tt.expectedRepo { + t.Errorf("ParseRepositoryTag(%q): wrong repository. Want %q. Got %q", tt.input, tt.expectedRepo, repo) + } + if tag != tt.expectedTag { + t.Errorf("ParseRepositoryTag(%q): wrong tag. Want %q. Got %q", tt.input, tt.expectedTag, tag) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go new file mode 100644 index 000000000..0d3e2d43f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go @@ -0,0 +1,127 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" +) + +// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the +// network already exists. +var ErrNetworkAlreadyExists = errors.New("network already exists") + +// Network represents a network. +// +// See https://goo.gl/FDkCdQ for more details. +type Network struct { + Name string `json:"name"` + ID string `json:"id"` + Type string `json:"type"` + Endpoints []*Endpoint `json:"endpoints"` +} + +// Endpoint represents an endpoint. +// +// See https://goo.gl/FDkCdQ for more details. +type Endpoint struct { + Name string `json:"name"` + ID string `json:"id"` + Network string `json:"network"` +} + +// ListNetworks returns all networks. +// +// See https://goo.gl/4hCNtZ for more details. +func (c *Client) ListNetworks() ([]Network, error) { + body, _, err := c.do("GET", "/networks", doOptions{}) + if err != nil { + return nil, err + } + var networks []Network + if err := json.Unmarshal(body, &networks); err != nil { + return nil, err + } + return networks, nil +} + +// NetworkInfo returns information about a network by its ID. +// +// See https://goo.gl/4hCNtZ for more details. +func (c *Client) NetworkInfo(id string) (*Network, error) { + path := "/networks/" + id + body, status, err := c.do("GET", path, doOptions{}) + if status == http.StatusNotFound { + return nil, &NoSuchNetwork{ID: id} + } + if err != nil { + return nil, err + } + var network Network + if err := json.Unmarshal(body, &network); err != nil { + return nil, err + } + return &network, nil +} + +// CreateNetworkOptions specify parameters to the CreateNetwork function and +// (for now) is the expected body of the "create network" http request message +// +// See https://goo.gl/FDkCdQ for more details. +type CreateNetworkOptions struct { + Name string `json:"name"` + NetworkType string `json:"network_type"` + Options map[string]interface{} `json:"options"` +} + +// CreateNetwork creates a new network, returning the network instance, +// or an error in case of failure. +// +// See http://goo.gl/mErxNp for more details. +func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { + body, status, err := c.do( + "POST", + "/networks", + doOptions{ + data: opts, + }, + ) + + if status == http.StatusConflict { + return nil, ErrNetworkAlreadyExists + } + if err != nil { + return nil, err + } + + type createNetworkResponse struct { + ID string + } + var ( + network Network + resp createNetworkResponse + ) + err = json.Unmarshal(body, &resp) + if err != nil { + return nil, err + } + + network.Name = opts.Name + network.ID = resp.ID + network.Type = opts.NetworkType + + return &network, nil +} + +// NoSuchNetwork is the error returned when a given network does not exist. +type NoSuchNetwork struct { + ID string +} + +func (err *NoSuchNetwork) Error() string { + return fmt.Sprintf("No such network: %s", err.ID) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go new file mode 100644 index 000000000..970988cf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go @@ -0,0 +1,96 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "net/http" + "net/url" + "reflect" + "testing" +) + +func TestListNetworks(t *testing.T) { + jsonNetworks := `[ + { + "ID": "8dfafdbc3a40", + "Name": "blah", + "Type": "bridge", + "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] + }, + { + "ID": "9fb1e39c", + "Name": "foo", + "Type": "bridge", + "Endpoints":[{"ID": "c080be979dda", "Name": "lllll2222", "Network": "9fb1e39c"}] + } +]` + var expected []Network + err := json.Unmarshal([]byte(jsonNetworks), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: jsonNetworks, status: http.StatusOK}) + containers, err := client.ListNetworks() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(containers, expected) { + t.Errorf("ListNetworks: Expected %#v. Got %#v.", expected, containers) + } +} + +func TestNetworkInfo(t *testing.T) { + jsonNetwork := `{ + "ID": "8dfafdbc3a40", + "Name": "blah", + "Type": "bridge", + "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] + }` + var expected Network + err := json.Unmarshal([]byte(jsonNetwork), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonNetwork, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "8dfafdbc3a40" + network, err := client.NetworkInfo(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*network, expected) { + t.Errorf("NetworkInfo(%q): Expected %#v. Got %#v.", id, expected, network) + } + expectedURL, _ := url.Parse(client.getURL("/networks/8dfafdbc3a40")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("NetworkInfo(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestNetworkCreate(t *testing.T) { + jsonID := `{"ID": "8dfafdbc3a40"}` + jsonNetwork := `{ + "ID": "8dfafdbc3a40", + "Name": "foobar", + "Type": "bridge" + }` + var expected Network + err := json.Unmarshal([]byte(jsonNetwork), &expected) + if err != nil { + t.Fatal(err) + } + + client := newTestClient(&FakeRoundTripper{message: jsonID, status: http.StatusOK}) + opts := CreateNetworkOptions{"foobar", "bridge", nil} + network, err := client.CreateNetwork(opts) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(*network, expected) { + t.Errorf("CreateNetwork: Expected %#v. Got %#v.", expected, network) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go new file mode 100644 index 000000000..16aa00388 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go @@ -0,0 +1,49 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +// Signal represents a signal that can be send to the container on +// KillContainer call. +type Signal int + +// These values represent all signals available on Linux, where containers will +// be running. +const ( + SIGABRT = Signal(0x6) + SIGALRM = Signal(0xe) + SIGBUS = Signal(0x7) + SIGCHLD = Signal(0x11) + SIGCLD = Signal(0x11) + SIGCONT = Signal(0x12) + SIGFPE = Signal(0x8) + SIGHUP = Signal(0x1) + SIGILL = Signal(0x4) + SIGINT = Signal(0x2) + SIGIO = Signal(0x1d) + SIGIOT = Signal(0x6) + SIGKILL = Signal(0x9) + SIGPIPE = Signal(0xd) + SIGPOLL = Signal(0x1d) + SIGPROF = Signal(0x1b) + SIGPWR = Signal(0x1e) + SIGQUIT = Signal(0x3) + SIGSEGV = Signal(0xb) + SIGSTKFLT = Signal(0x10) + SIGSTOP = Signal(0x13) + SIGSYS = Signal(0x1f) + SIGTERM = Signal(0xf) + SIGTRAP = Signal(0x5) + SIGTSTP = Signal(0x14) + SIGTTIN = Signal(0x15) + SIGTTOU = Signal(0x16) + SIGUNUSED = Signal(0x1f) + SIGURG = Signal(0x17) + SIGUSR1 = Signal(0xa) + SIGUSR2 = Signal(0xc) + SIGVTALRM = Signal(0x1a) + SIGWINCH = Signal(0x1c) + SIGXCPU = Signal(0x18) + SIGXFSZ = Signal(0x19) +) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go new file mode 100644 index 000000000..48042cbda --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go @@ -0,0 +1,117 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" +) + +func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { + excludes, err := parseDockerignore(srcPath) + if err != nil { + return nil, err + } + + includes := []string{"."} + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. + // + // https://github.com/docker/docker/issues/8330 + // + forceIncludeFiles := []string{".dockerignore", dockerfilePath} + + for _, includeFile := range forceIncludeFiles { + if includeFile == "" { + continue + } + keepThem, err := fileutils.Matches(includeFile, excludes) + if err != nil { + return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) + } + if keepThem { + includes = append(includes, includeFile) + } + } + + if err := validateContextDirectory(srcPath, excludes); err != nil { + return nil, err + } + tarOpts := &archive.TarOptions{ + ExcludePatterns: excludes, + IncludeFiles: includes, + Compression: archive.Uncompressed, + NoLchown: true, + } + return archive.TarWithOptions(srcPath, tarOpts) +} + +// validateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read. +// Symlinks which point to non-existing files don't trigger an error +func validateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +func parseDockerignore(root string) ([]string, error) { + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) + } + excludes = strings.Split(string(ignore), "\n") + + return excludes, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore new file mode 100644 index 000000000..027e8c20e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore @@ -0,0 +1,3 @@ +container.tar +dockerfile.tar +foofile diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile new file mode 100644 index 000000000..0948dcfa8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile @@ -0,0 +1,15 @@ +# this file describes how to build tsuru python image +# to run it: +# 1- install docker +# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile + +from base:ubuntu-quantal +run apt-get install wget -y --force-yes +run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate +run mkdir /var/lib/tsuru +run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1 +run cp /var/lib/tsuru/python/deploy /var/lib/tsuru +run cp /var/lib/tsuru/base/restart /var/lib/tsuru +run cp /var/lib/tsuru/base/start /var/lib/tsuru +run /var/lib/tsuru/base/install +run /var/lib/tsuru/base/setup diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile new file mode 100644 index 000000000..e69de29bb diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem new file mode 100644 index 000000000..8e38bba13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC1TCCAb+gAwIBAgIQJ9MsNxrUxumNbAytGi3GEDALBgkqhkiG9w0BAQswFjEU +MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTM4WhcNMTcwOTMwMjAy +MTM4WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBALpFCSARjG+5yXoqr7UMzuE0df7RRZfeRZI06lJ02ZqV4Iii +rgL7ML9yPxX50NbLnjiilSDTUhnyocYFItokzUzz8qpX/nlYhuN2Iqwh4d0aWS8z +f5y248F+H1z+HY2W8NPl/6DVlVwYaNW1/k+RPMlHS0INLR6j+3Ievew7RNE0NnM2 +znELW6NetekDt3GUcz0Z95vDUDfdPnIk1eIFMmYvLxZh23xOca4Q37a3S8F3d+dN ++OOpwjdgY9Qme0NQUaXpgp58jWuQfB8q7mZrdnLlLqRa8gx1HeDSotX7UmWtWPkb +vd9EdlKLYw5PVpxMV1rkwf2t4TdgD5NfkpXlXkkCAwEAAaMjMCEwDgYDVR0PAQH/ +BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQBxYjHVSKqE +MJw7CW0GddesULtXXVWGJuZdWJLQlPvPMfIfjIvlcZyS4cdVNiQ3sREFIZz8TpII +CT0/Pg3sgv/FcOQe1CN0xZYZcyiAZHK1z0fJQq2qVpdv7+tJcjI2vvU6NI24iQCo +W1wz25trJz9QbdB2MRLMjyz7TSWuafztIvcfEzaIdQ0Whqund/cSuPGQx5IwF83F +rvlkOyJSH2+VIEBTCIuykJeL0DLTt8cePBQR5L1ISXb4RUMK9ZtqRscBRv8sn7o2 +ixG3wtL0gYF4xLtsQWVxI3iFVrU3WzOH/3c5shVRkWBd+AQRSwCJI4mKH7penJCF +i3/zzlkvOnjV +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem new file mode 100644 index 000000000..5e7244b24 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC6DCCAdKgAwIBAgIRANO6ymxQAjp66KmEka1G6b0wCwYJKoZIhvcNAQELMBYx +FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxNjIwMjE1MloXDTE3MDkzMDIw +MjE1MlowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQDGA1mAhSOpZspD1dpZ7qVEQrIJw4Xo8252jHaORnEdDiFm +b6brEmr6jw8t4P3IGxbqBc/TqRV+SSXxwYEVvfpeQKH+SmqStoMNtD3Ura161az4 +V0BcxMtSlsUGpoz+//QCAq8qiaxMwgiyc5253mkQm88anj2cNt7xbewiu/KFWuf7 +BVpNK1+ltpJmlukfcj/G+I1bw7j1KxBjDrFqe5cyDuuZcDL2tmUXP/ZWDyXwSv+H +AOckqn44z6aXlBkVvOXDBZJqY76d/vWVDNCuZeXRnqlhP3t1kH4V0RQXo+JD2tgt +JgdU0unzyoFOSWNUBPm73tqmjUGGAmGHBmeegJr/AgMBAAGjNTAzMA4GA1UdDwEB +/wQEAwIAgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMAsGCSqG +SIb3DQEBCwOCAQEABVTWl5SmBP+j5He5bQsgnIXjviSKqe40/10V4LJAOmilycRF +zLrzM+YMwfjg6PLIs8CldAMWHw9y9ktZY4MxkgCktaiaN/QmMTMwFWEcN4wy5IpM +U5l93eAg7xsnY430h3QBBADujX4wdF3fs8rSL8zAAQFL0ihurwU124K3yXKsrwpb +CiVUGfIN4sPwjy8Ws9oxHFDC9/P8lgjHZ1nBIf8KSHnMzlxDGj7isQfhtH+7mcCL +cM1qO2NirS2v7uaEPPY+MJstAz+W7EJCW9dfMSmHna2SDC37Xkin7uEY9z+qaKFL +8d/XxOB/L8Ucy8VZhdsv0dsBq5KfJntITM0ksQ== +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar new file mode 100644 index 0000000000000000000000000000000000000000..e4b066e3b6df8cb78ac445a34234f3780d164cf4 GIT binary patch literal 2048 zcmeH_Q3``F42FH)DgF~kTC`qZ7s*`9%A^%r$Bu89Fp<6NMew1akmheFe?H>)Y5N#5 z`(UT)m>?q4G^iwZ#(XmAwH8Ujv`|_rQd)Ig3sQ!(szArs+5bAH%#&Di1HU}iJx_zp z+3uU9k~Zgl)J<3?S%)LS_Hgc7e)t4AX&%Rz>>WAcX2Ec>82D}md=O1Y)p%bo=N_rJ OD+CIGLZA@%gTMmt=q{T8 literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar new file mode 100644 index 0000000000000000000000000000000000000000..32c9ce64704835cd096b85ac44c35b5087b5ccdd GIT binary patch literal 2560 zcmeHGy>8<$49;3V1%d0TNOs}`$a>xT46-c8LTt+?QB8ACf0XPiQll-h0~9$I?_v`_`p)qp;@ z0OJK)JAmosQD=m*-~y?5ASGvD1{zS;L7n!AYz2z}2Y8%Kb25fgK0fDb5l4UE+{yF$ zXs`{{TG^hbn!J);Cl1>2UV0=k!T8hL+GbhfZ2u5L51|SJ2KFb&fyiW3|3Qw(jvC+i zouk4oz*u9Q((Iyric9uLhPZsmgZ8ANMrS_2p5cn+n!M}dU&=mMrdq8|OlgOvF-oFN zh5A!%9Pk(EcxS4q(c~Z~u-BL7!+gIN2&&-GnGy1YRpY|{e@?X?J9}9;KY_$PxYO}H o;5QJT#=q||{Y*ZuNn-Gk-)jtGb|Y`+PV+v2`vmS2xaA4_1I+dVl>h($ literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile new file mode 100644 index 000000000..e69de29bb diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem new file mode 100644 index 000000000..a9346bcf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxgNZgIUjqWbKQ9XaWe6lREKyCcOF6PNudox2jkZxHQ4hZm+m +6xJq+o8PLeD9yBsW6gXP06kVfkkl8cGBFb36XkCh/kpqkraDDbQ91K2tetWs+FdA +XMTLUpbFBqaM/v/0AgKvKomsTMIIsnOdud5pEJvPGp49nDbe8W3sIrvyhVrn+wVa +TStfpbaSZpbpH3I/xviNW8O49SsQYw6xanuXMg7rmXAy9rZlFz/2Vg8l8Er/hwDn +JKp+OM+ml5QZFbzlwwWSamO+nf71lQzQrmXl0Z6pYT97dZB+FdEUF6PiQ9rYLSYH +VNLp88qBTkljVAT5u97apo1BhgJhhwZnnoCa/wIDAQABAoIBAQCaGy9EC9pmU95l +DwGh7k5nIrUnTilg1FwLHWSDdCVCZKXv8ENrPelOWZqJrUo1u4eI2L8XTsewgkNq +tJu/DRzWz9yDaO0qg6rZNobMh+K076lvmZA44twOydJLS8H+D7ua+PXU2FLlZjmY +kMyXRJZmW6zCXZc7haTbJx6ZJccoquk/DkS4FcFurJP177u1YrWS9TTw9kensUtU +jQ63uf56UTN1i+0+Rxl7OW1TZlqwlri5I4njg5249+FxwwHzIq8+l7zD7K9pl8c/ +nG1HuulvU2bVlDlRdyslMPAH34vw9Sku1BD8furrJLr1na5lRSLKJODEaIPEsLwv +CdEUwP9JAoGBAO76ZW80RyNB2fA+wbTq70Sr8CwrXxYemXrez5LKDC7SsohKFCPE +IedpO/n+nmymiiJvMm874EExoG6BVrbkWkeb+2vinEfOQNlDMsDx7WLjPekP3t6i +rXHO3CjFooVFq2z3mZa/Nc5NZqu8fNWNCKJxZDJphdoj6sORNJIUvZVjAoGBANQd +++J+ITcu3/+A6JrGcgLunBFQYPqkiItk0J4QKYKuX5ik9rWcQDN8TTtfW2mDuiQ4 +NrCwuVPq1V1kB16JzH017SsYLo9g8I20YjnBZge9pKTeUaLVTb3C50LW8FBylop0 +Bnm597dNbtSjphjoTMg0XyC19o3Esf2YeWG0QNS1AoGAWWDfFRNJU99qIldmXULM +0DM6NVrXSk+ReYnhunXEzrJQwXZrR+EwCPurydk36Uz0NuK9yypquhdUeF/5TZfk +SAoHo5byekyipl9imRUigqyY2BTudvgCxKDoaHtaSFwBPFTyZZYICquaLbrmOXxw +8UhVgCFFRYvPXuts7QHC0h8CgYBWEvy9gfU0kV7wLX02IUTuj6jhFb7ktpN6DSTi +nyhZES1VoctDEu6ydcRZTW6ouH12aSE4Pd5WgTqntQmQgVZrkNB25k8ue2Xh+srJ +KQOgLIJ9LIHwE6KCWG7DnrjRzE3uTPq7to0g4tkQjH/AJ7PQof/gJDayfJjFkXPg +A+cy6QKBgEPbKpiqscm03gT2QanBut5pg4dqPOxp0SlErA3kSFNTRK3oYBQPC+LH +qA5nD5brdkeNBB58Rll8Zpzxiff50bcvLP/7/Sb3NjaXFTEY0gVbdRof3n6N0YP3 +Hu5XDNJ9RNkNzE5RIG1g86KE+aKlcrKMaigqAiuIy2PSnjkQeGk8 +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem new file mode 100644 index 000000000..89cc445e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC/DCCAeagAwIBAgIQMUILcXtvmSOK63zEBo0VXzALBgkqhkiG9w0BAQswFjEU +MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTQ2WhcNMTcwOTMwMjAy +MTQ2WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANxUOUhNnqFnrTlLsBYzfFRZWQo268l+4K4lOJCVbfDonP3g +Mz0vGi9fcyFqEWSA8Y+ShXna625HTnReCwFdsu0861qCIq7v95hFFCyOe0iIxpd0 +AKLnl90d+1vonE7andgFgoobbTiMly4UK4H6z8D148fFNIihoteOG3PIF89TFxP7 +CJ/3wXnx/IKpdlO8PAnub3tBPJHvGDj7KORLy4IBxRX5VBAdfGNybE66fcrehEva +rLA4m9pgiaR/Nnr9FdKhPyqYdjflLNvzydxNvMIV4M0hFlhXmYvpMjA5/XsTnsyV +t9JHJa5Upwqsbne08t7rsm7liZNxZlko8xPOTQcCAwEAAaNKMEgwDgYDVR0PAQH/ +BAQDAgCgMAwGA1UdEwEB/wQCMAAwKAYDVR0RBCEwH4ILYm9vdDJkb2NrZXKHBH8A +AAGHBAoAAg+HBMCoO2cwCwYJKoZIhvcNAQELA4IBAQAYoYcDkDWkl73FZ0WnPmAj +LiF7HU95Qg3KyEpFsAJeShSLPPbQntmwhdekEzY4tQ3eKQB/+zHFjzsCr/lmDUmH +Ea/ryQ17C+jyH+Ykg0IWW6L6veZhvRDg6Z9focVtPVBRxPTqC/Qhb54blWRASV+W +UreMuXQ5+1dQptAM7ixOeLVHjBi/bd9TL3jvwBVCr9QedteMjjK4TCF9Tbcou+MF +2w3OJJZMDhcD+YwoK9uJDqlKmcTm/vVMbSsp/pTMcnQ7jxCeR8/XyX+VwTZwaHAa +o92Q/eg3THAiWhvyT/SzyH9dHHBAyXynUwGCggKawHktfvW4QXRPuLxLrJ7iB5cy +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem new file mode 100644 index 000000000..c897e5da5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEoAIBAAKCAQEA3FQ5SE2eoWetOUuwFjN8VFlZCjbryX7griU4kJVt8Oic/eAz +PS8aL19zIWoRZIDxj5KFedrrbkdOdF4LAV2y7TzrWoIiru/3mEUULI57SIjGl3QA +oueX3R37W+icTtqd2AWCihttOIyXLhQrgfrPwPXjx8U0iKGi144bc8gXz1MXE/sI +n/fBefH8gql2U7w8Ce5ve0E8ke8YOPso5EvLggHFFflUEB18Y3JsTrp9yt6ES9qs +sDib2mCJpH82ev0V0qE/Kph2N+Us2/PJ3E28whXgzSEWWFeZi+kyMDn9exOezJW3 +0kclrlSnCqxud7Ty3uuybuWJk3FmWSjzE85NBwIDAQABAoIBAG0ak+cW8LeShHf7 +3+2Of0GxoOLrAWWdG5uAuPr31CJYve0FybnBimDtDjD8ujIfm/7xmoEWBEFutA3x +x9dcU88gvJbsHEqub9gKVQwfXjMz78tt2SbSMiR/xUnk7QorPcCMMfE71aEMFYzu +1gCed6Rg3vO81t/V0rKVH0j9S7UQz5v/oX15eVDV5LOqyCHwAi6K0eXXbqnbI0TH +SOQ/nexM2msVXWbO9t6ra6f5V7FXziDK5Xi+rPxRbX9mkrDzxDAevfuRqYBx5vtL +W2Q2hKjUAHFgXFniNSZBS7dCdAtz0el/3ct+cNmpuTMhhs7M6wC1CuYiZ/DxLiFh +Si73VckCgYEA+/ceh3+VjtQ0rgEw8sD9bqYEA8IaBiObjneIoFnKBYRG7yZd8JMm +HD4M/aQ1qhcRLPN7GR03YQULgQJURbKSjJHnhfTXHyeHC3NN4gMVHQXewu2MHCh6 +7FCQ9CfK0KcYLgegVVvL3PrF3hyWGnmTu+G0UkDQRYVnaNrB7snrW6UCgYEA39tq ++MCQdu0moJ5szSZf02undg9EeW6isk9qzi7TId3/MLci2eH7PEnipipPUK3+DERq +aba0y0TKgBR2EXvXLFJA/+kfdo2loIEHOfox85HVfxgUaFRti63ZI0uF8D0QT2Yy +oJal+RFghVoSnv4LjhRKEPbIkScTXGjdK+7wFjsCfz79iKRXQQx0ALd/lL0bgkAn +QNmvrNHcFQeI2p8700WNzC39aX67SsvEt3qxkrjzC1gxhpTAuReIK1gVPPwvqHN8 +BmV20FD5kMlMCix2mNCopwgUWvKvLAvoGFTxncKMA39+aJbuXAjiqJTekKgNvOE7 +i9kEWw0GTNPp3JHV6QECgYAPwb0M11kT1euDIMOdyRazpf86kyaJuZzgGjD1ZFxe +JOcigbGFTp/FhZnbglzk2+pm6KXo3QBq0mPCki4hWusxZnTGzpz1VlETNCHTFeZQ +M7KoaIR/N3oie9Et59H8r/+m5xWnMhNqratyl316DX24uXrhKM3DUdHODl+LCR2D +IwKBgE1MbHuwolUPEw3HeO4R7NMFVTFei7E/fpUsimPfArGg8UydwvloNT1myJos +N2JzfGGjN2KPVcBk9fOs71mJ6VcK3C3g5JIccplk6h9VNaw55+zdQvKPTzoBoTvy +A+Fwx2AlF61KeRF87DL2YTRJ6B9MHmWgf7+GVZOxomLgEAcZ +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink new file mode 120000 index 000000000..3ddf86a35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink @@ -0,0 +1 @@ +doesnotexist \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go new file mode 100644 index 000000000..41d872771 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go @@ -0,0 +1,1062 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testing provides a fake implementation of the Docker API, useful for +// testing purpose. +package testing + +import ( + "archive/tar" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + mathrand "math/rand" + "net" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/fsouza/go-dockerclient" + "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" + "github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux" +) + +var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) + +// DockerServer represents a programmable, concurrent (not much), HTTP server +// implementing a fake version of the Docker remote API. +// +// It can used in standalone mode, listening for connections or as an arbitrary +// HTTP handler. +// +// For more details on the remote API, check http://goo.gl/G3plxW. +type DockerServer struct { + containers []*docker.Container + execs []*docker.ExecInspect + execMut sync.RWMutex + cMut sync.RWMutex + images []docker.Image + iMut sync.RWMutex + imgIDs map[string]string + networks []*docker.Network + netMut sync.RWMutex + listener net.Listener + mux *mux.Router + hook func(*http.Request) + failures map[string]string + multiFailures []map[string]string + execCallbacks map[string]func() + statsCallbacks map[string]func(string) docker.Stats + customHandlers map[string]http.Handler + handlerMutex sync.RWMutex + cChan chan<- *docker.Container +} + +// NewServer returns a new instance of the fake server, in standalone mode. Use +// the method URL to get the URL of the server. +// +// It receives the bind address (use 127.0.0.1:0 for getting an available port +// on the host), a channel of containers and a hook function, that will be +// called on every request. +// +// The fake server will send containers in the channel whenever the container +// changes its state, via the HTTP API (i.e.: create, start and stop). This +// channel may be nil, which means that the server won't notify on state +// changes. +func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) { + listener, err := net.Listen("tcp", bind) + if err != nil { + return nil, err + } + server := DockerServer{ + listener: listener, + imgIDs: make(map[string]string), + hook: hook, + failures: make(map[string]string), + execCallbacks: make(map[string]func()), + statsCallbacks: make(map[string]func(string) docker.Stats), + customHandlers: make(map[string]http.Handler), + cChan: containerChan, + } + server.buildMuxer() + go http.Serve(listener, &server) + return &server, nil +} + +func (s *DockerServer) notify(container *docker.Container) { + if s.cChan != nil { + s.cChan <- container + } +} + +func (s *DockerServer) buildMuxer() { + s.mux = mux.NewRouter() + s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer)) + s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers)) + s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer)) + s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer)) + s.mux.Path("/containers/{id:.*}/rename").Methods("POST").HandlerFunc(s.handlerWrapper(s.renameContainer)) + s.mux.Path("/containers/{id:.*}/top").Methods("GET").HandlerFunc(s.handlerWrapper(s.topContainer)) + s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer)) + s.mux.Path("/containers/{id:.*}/kill").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) + s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) + s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer)) + s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer)) + s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer)) + s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer)) + s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) + s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer)) + s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer)) + s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer)) + s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer)) + s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer)) + s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage)) + s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage)) + s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages)) + s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage)) + s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage)) + s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage)) + s.mux.Path("/images/{name:.*}/tag").Methods("POST").HandlerFunc(s.handlerWrapper(s.tagImage)) + s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents) + s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker)) + s.mux.Path("/images/load").Methods("POST").HandlerFunc(s.handlerWrapper(s.loadImage)) + s.mux.Path("/images/{id:.*}/get").Methods("GET").HandlerFunc(s.handlerWrapper(s.getImage)) + s.mux.Path("/networks").Methods("GET").HandlerFunc(s.handlerWrapper(s.listNetworks)) + s.mux.Path("/networks/{id:.*}").Methods("GET").HandlerFunc(s.handlerWrapper(s.networkInfo)) + s.mux.Path("/networks").Methods("POST").HandlerFunc(s.handlerWrapper(s.createNetwork)) +} + +// SetHook changes the hook function used by the server. +// +// The hook function is a function called on every request. +func (s *DockerServer) SetHook(hook func(*http.Request)) { + s.hook = hook +} + +// PrepareExec adds a callback to a container exec in the fake server. +// +// This function will be called whenever the given exec id is started, and the +// given exec id will remain in the "Running" start while the function is +// running, so it's useful for emulating an exec that runs for two seconds, for +// example: +// +// opts := docker.CreateExecOptions{ +// AttachStdin: true, +// AttachStdout: true, +// AttachStderr: true, +// Tty: true, +// Cmd: []string{"/bin/bash", "-l"}, +// } +// // Client points to a fake server. +// exec, err := client.CreateExec(opts) +// // handle error +// server.PrepareExec(exec.ID, func() {time.Sleep(2 * time.Second)}) +// err = client.StartExec(exec.ID, docker.StartExecOptions{Tty: true}) // will block for 2 seconds +// // handle error +func (s *DockerServer) PrepareExec(id string, callback func()) { + s.execCallbacks[id] = callback +} + +// PrepareStats adds a callback that will be called for each container stats +// call. +// +// This callback function will be called multiple times if stream is set to +// true when stats is called. +func (s *DockerServer) PrepareStats(id string, callback func(string) docker.Stats) { + s.statsCallbacks[id] = callback +} + +// PrepareFailure adds a new expected failure based on a URL regexp it receives +// an id for the failure. +func (s *DockerServer) PrepareFailure(id string, urlRegexp string) { + s.failures[id] = urlRegexp +} + +// PrepareMultiFailures enqueues a new expected failure based on a URL regexp +// it receives an id for the failure. +func (s *DockerServer) PrepareMultiFailures(id string, urlRegexp string) { + s.multiFailures = append(s.multiFailures, map[string]string{"error": id, "url": urlRegexp}) +} + +// ResetFailure removes an expected failure identified by the given id. +func (s *DockerServer) ResetFailure(id string) { + delete(s.failures, id) +} + +// ResetMultiFailures removes all enqueued failures. +func (s *DockerServer) ResetMultiFailures() { + s.multiFailures = []map[string]string{} +} + +// CustomHandler registers a custom handler for a specific path. +// +// For example: +// +// server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// http.Error(w, "Something wrong is not right", http.StatusInternalServerError) +// })) +func (s *DockerServer) CustomHandler(path string, handler http.Handler) { + s.handlerMutex.Lock() + s.customHandlers[path] = handler + s.handlerMutex.Unlock() +} + +// MutateContainer changes the state of a container, returning an error if the +// given id does not match to any container "running" in the server. +func (s *DockerServer) MutateContainer(id string, state docker.State) error { + for _, container := range s.containers { + if container.ID == id { + container.State = state + return nil + } + } + return errors.New("container not found") +} + +// Stop stops the server. +func (s *DockerServer) Stop() { + if s.listener != nil { + s.listener.Close() + } +} + +// URL returns the HTTP URL of the server. +func (s *DockerServer) URL() string { + if s.listener == nil { + return "" + } + return "http://" + s.listener.Addr().String() + "/" +} + +// ServeHTTP handles HTTP requests sent to the server. +func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.handlerMutex.RLock() + defer s.handlerMutex.RUnlock() + for re, handler := range s.customHandlers { + if m, _ := regexp.MatchString(re, r.URL.Path); m { + handler.ServeHTTP(w, r) + return + } + } + s.mux.ServeHTTP(w, r) + if s.hook != nil { + s.hook(r) + } +} + +// DefaultHandler returns default http.Handler mux, it allows customHandlers to +// call the default behavior if wanted. +func (s *DockerServer) DefaultHandler() http.Handler { + return s.mux +} + +func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + for errorID, urlRegexp := range s.failures { + matched, err := regexp.MatchString(urlRegexp, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !matched { + continue + } + http.Error(w, errorID, http.StatusBadRequest) + return + } + for i, failure := range s.multiFailures { + matched, err := regexp.MatchString(failure["url"], r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !matched { + continue + } + http.Error(w, failure["error"], http.StatusBadRequest) + s.multiFailures = append(s.multiFailures[:i], s.multiFailures[i+1:]...) + return + } + f(w, r) + } +} + +func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) { + all := r.URL.Query().Get("all") + s.cMut.RLock() + result := make([]docker.APIContainers, 0, len(s.containers)) + for _, container := range s.containers { + if all == "1" || container.State.Running { + result = append(result, docker.APIContainers{ + ID: container.ID, + Image: container.Image, + Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")), + Created: container.Created.Unix(), + Status: container.State.String(), + Ports: container.NetworkSettings.PortMappingAPI(), + Names: []string{fmt.Sprintf("/%s", container.Name)}, + }) + } + } + s.cMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) { + s.cMut.RLock() + result := make([]docker.APIImages, len(s.images)) + for i, image := range s.images { + result[i] = docker.APIImages{ + ID: image.ID, + Created: image.Created.Unix(), + } + for tag, id := range s.imgIDs { + if id == image.ID { + result[i].RepoTags = append(result[i].RepoTags, tag) + } + } + } + s.cMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) findImage(id string) (string, error) { + s.iMut.RLock() + defer s.iMut.RUnlock() + image, ok := s.imgIDs[id] + if ok { + return image, nil + } + image, _, err := s.findImageByID(id) + return image, err +} + +func (s *DockerServer) findImageByID(id string) (string, int, error) { + s.iMut.RLock() + defer s.iMut.RUnlock() + for i, image := range s.images { + if image.ID == id { + return image.ID, i, nil + } + } + return "", -1, errors.New("No such image") +} + +func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) { + var config struct { + *docker.Config + HostConfig *docker.HostConfig + } + defer r.Body.Close() + err := json.NewDecoder(r.Body).Decode(&config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + name := r.URL.Query().Get("name") + if name != "" && !nameRegexp.MatchString(name) { + http.Error(w, "Invalid container name", http.StatusInternalServerError) + return + } + if _, err := s.findImage(config.Image); err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + ports := map[docker.Port][]docker.PortBinding{} + for port := range config.ExposedPorts { + ports[port] = []docker.PortBinding{{ + HostIP: "0.0.0.0", + HostPort: strconv.Itoa(mathrand.Int() % 65536), + }} + } + + //the container may not have cmd when using a Dockerfile + var path string + var args []string + if len(config.Cmd) == 1 { + path = config.Cmd[0] + } else if len(config.Cmd) > 1 { + path = config.Cmd[0] + args = config.Cmd[1:] + } + + generatedID := s.generateID() + config.Config.Hostname = generatedID[:12] + container := docker.Container{ + Name: name, + ID: generatedID, + Created: time.Now(), + Path: path, + Args: args, + Config: config.Config, + HostConfig: config.HostConfig, + State: docker.State{ + Running: false, + Pid: mathrand.Int() % 50000, + ExitCode: 0, + StartedAt: time.Now(), + }, + Image: config.Image, + NetworkSettings: &docker.NetworkSettings{ + IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2), + IPPrefixLen: 24, + Gateway: "172.16.42.1", + Bridge: "docker0", + Ports: ports, + }, + } + s.cMut.Lock() + if container.Name != "" { + for _, c := range s.containers { + if c.Name == container.Name { + defer s.cMut.Unlock() + http.Error(w, "there's already a container with this name", http.StatusConflict) + return + } + } + } + s.containers = append(s.containers, &container) + s.cMut.Unlock() + w.WriteHeader(http.StatusCreated) + s.notify(&container) + var c = struct{ ID string }{ID: container.ID} + json.NewEncoder(w).Encode(c) +} + +func (s *DockerServer) generateID() string { + var buf [16]byte + rand.Read(buf[:]) + return fmt.Sprintf("%x", buf) +} + +func (s *DockerServer) renameContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, index, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + copy := *container + copy.Name = r.URL.Query().Get("name") + s.cMut.Lock() + defer s.cMut.Unlock() + if s.containers[index].ID == copy.ID { + s.containers[index] = © + } + w.WriteHeader(http.StatusNoContent) +} + +func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(container) +} + +func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + _, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + stream, _ := strconv.ParseBool(r.URL.Query().Get("stream")) + callback := s.statsCallbacks[id] + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + encoder := json.NewEncoder(w) + for { + var stats docker.Stats + if callback != nil { + stats = callback(id) + } + encoder.Encode(stats) + if !stream { + break + } + } +} + +func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if !container.State.Running { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Container %s is not running", id) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + result := docker.TopResult{ + Titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}, + Processes: [][]string{ + {"root", "7535", "7516", "0", "03:20", "?", "00:00:00", container.Path + " " + strings.Join(container.Args, " ")}, + }, + } + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + defer r.Body.Close() + var hostConfig docker.HostConfig + err = json.NewDecoder(r.Body).Decode(&hostConfig) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + container.HostConfig = &hostConfig + if container.State.Running { + http.Error(w, "Container already running", http.StatusBadRequest) + return + } + container.State.Running = true + s.notify(container) +} + +func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if !container.State.Running { + http.Error(w, "Container not running", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Running = false + s.notify(container) +} + +func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if container.State.Paused { + http.Error(w, "Container already paused", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Paused = true +} + +func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if !container.State.Paused { + http.Error(w, "Container not paused", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Paused = false +} + +func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + hijacker, ok := w.(http.Hijacker) + if !ok { + http.Error(w, "cannot hijack connection", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.raw-stream") + w.WriteHeader(http.StatusOK) + conn, _, err := hijacker.Hijack() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout) + if container.State.Running { + fmt.Fprintf(outStream, "Container %q is running\n", container.ID) + } else { + fmt.Fprintf(outStream, "Container %q is not running\n", container.ID) + } + fmt.Fprintln(outStream, "What happened?") + fmt.Fprintln(outStream, "Something happened") + conn.Close() +} + +func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + for { + time.Sleep(1e6) + s.cMut.RLock() + if !container.State.Running { + s.cMut.RUnlock() + break + } + s.cMut.RUnlock() + } + result := map[string]int{"StatusCode": container.State.ExitCode} + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + force := r.URL.Query().Get("force") + _, index, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if s.containers[index].State.Running && force != "1" { + msg := "Error: API error (406): Impossible to remove a running container, please stop it first" + http.Error(w, msg, http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + s.cMut.Lock() + defer s.cMut.Unlock() + s.containers[index] = s.containers[len(s.containers)-1] + s.containers = s.containers[:len(s.containers)-1] +} + +func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) { + id := r.URL.Query().Get("container") + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + var config *docker.Config + runConfig := r.URL.Query().Get("run") + if runConfig != "" { + config = new(docker.Config) + err = json.Unmarshal([]byte(runConfig), config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } + w.WriteHeader(http.StatusOK) + image := docker.Image{ + ID: "img-" + container.ID, + Parent: container.Image, + Container: container.ID, + Comment: r.URL.Query().Get("m"), + Author: r.URL.Query().Get("author"), + Config: config, + } + repository := r.URL.Query().Get("repo") + tag := r.URL.Query().Get("tag") + s.iMut.Lock() + s.images = append(s.images, image) + if repository != "" { + if tag != "" { + repository += ":" + tag + } + s.imgIDs[repository] = image.ID + } + s.iMut.Unlock() + fmt.Fprintf(w, `{"ID":%q}`, image.ID) +} + +func (s *DockerServer) findContainer(idOrName string) (*docker.Container, int, error) { + s.cMut.RLock() + defer s.cMut.RUnlock() + for i, container := range s.containers { + if container.ID == idOrName || container.Name == idOrName { + return container, i, nil + } + } + return nil, -1, errors.New("No such container") +} + +func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) { + if ct := r.Header.Get("Content-Type"); ct == "application/tar" { + gotDockerFile := false + tr := tar.NewReader(r.Body) + for { + header, err := tr.Next() + if err != nil { + break + } + if header.Name == "Dockerfile" { + gotDockerFile = true + } + } + if !gotDockerFile { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte("miss Dockerfile")) + return + } + } + //we did not use that Dockerfile to build image cause we are a fake Docker daemon + image := docker.Image{ + ID: s.generateID(), + Created: time.Now(), + } + + query := r.URL.Query() + repository := image.ID + if t := query.Get("t"); t != "" { + repository = t + } + s.iMut.Lock() + s.images = append(s.images, image) + s.imgIDs[repository] = image.ID + s.iMut.Unlock() + w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID))) +} + +func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) { + fromImageName := r.URL.Query().Get("fromImage") + tag := r.URL.Query().Get("tag") + image := docker.Image{ + ID: s.generateID(), + } + s.iMut.Lock() + s.images = append(s.images, image) + if fromImageName != "" { + if tag != "" { + fromImageName = fmt.Sprintf("%s:%s", fromImageName, tag) + } + s.imgIDs[fromImageName] = image.ID + } + s.iMut.Unlock() +} + +func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + tag := r.URL.Query().Get("tag") + if tag != "" { + name += ":" + tag + } + s.iMut.RLock() + if _, ok := s.imgIDs[name]; !ok { + s.iMut.RUnlock() + http.Error(w, "No such image", http.StatusNotFound) + return + } + s.iMut.RUnlock() + fmt.Fprintln(w, "Pushing...") + fmt.Fprintln(w, "Pushed") +} + +func (s *DockerServer) tagImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + s.iMut.RLock() + if _, ok := s.imgIDs[name]; !ok { + s.iMut.RUnlock() + http.Error(w, "No such image", http.StatusNotFound) + return + } + s.iMut.RUnlock() + s.iMut.Lock() + defer s.iMut.Unlock() + newRepo := r.URL.Query().Get("repo") + newTag := r.URL.Query().Get("tag") + if newTag != "" { + newRepo += ":" + newTag + } + s.imgIDs[newRepo] = s.imgIDs[name] + w.WriteHeader(http.StatusCreated) +} + +func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + s.iMut.RLock() + var tag string + if img, ok := s.imgIDs[id]; ok { + id, tag = img, id + } + var tags []string + for tag, taggedID := range s.imgIDs { + if taggedID == id { + tags = append(tags, tag) + } + } + s.iMut.RUnlock() + _, index, err := s.findImageByID(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.WriteHeader(http.StatusNoContent) + s.iMut.Lock() + defer s.iMut.Unlock() + if len(tags) < 2 { + s.images[index] = s.images[len(s.images)-1] + s.images = s.images[:len(s.images)-1] + } + if tag != "" { + delete(s.imgIDs, tag) + } +} + +func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + s.iMut.RLock() + defer s.iMut.RUnlock() + if id, ok := s.imgIDs[name]; ok { + for _, img := range s.images { + if img.ID == id { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(img) + return + } + } + } + http.Error(w, "not found", http.StatusNotFound) +} + +func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var events [][]byte + count := mathrand.Intn(20) + for i := 0; i < count; i++ { + data, err := json.Marshal(s.generateEvent()) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + events = append(events, data) + } + w.WriteHeader(http.StatusOK) + for _, d := range events { + fmt.Fprintln(w, d) + time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond) + } +} + +func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (s *DockerServer) generateEvent() *docker.APIEvents { + var eventType string + switch mathrand.Intn(4) { + case 0: + eventType = "create" + case 1: + eventType = "start" + case 2: + eventType = "stop" + case 3: + eventType = "destroy" + } + return &docker.APIEvents{ + ID: s.generateID(), + Status: eventType, + From: "mybase:latest", + Time: time.Now().Unix(), + } +} + +func (s *DockerServer) loadImage(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/tar") +} + +func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + exec := docker.ExecInspect{ + ID: s.generateID(), + Container: *container, + } + var params docker.CreateExecOptions + err = json.NewDecoder(r.Body).Decode(¶ms) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if len(params.Cmd) > 0 { + exec.ProcessConfig.EntryPoint = params.Cmd[0] + if len(params.Cmd) > 1 { + exec.ProcessConfig.Arguments = params.Cmd[1:] + } + } + s.execMut.Lock() + s.execs = append(s.execs, &exec) + s.execMut.Unlock() + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID}) +} + +func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + if exec, err := s.getExec(id); err == nil { + s.execMut.Lock() + exec.Running = true + s.execMut.Unlock() + if callback, ok := s.execCallbacks[id]; ok { + callback() + delete(s.execCallbacks, id) + } else if callback, ok := s.execCallbacks["*"]; ok { + callback() + delete(s.execCallbacks, "*") + } + s.execMut.Lock() + exec.Running = false + s.execMut.Unlock() + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) +} + +func (s *DockerServer) resizeExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + if _, err := s.getExec(id); err == nil { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) +} + +func (s *DockerServer) inspectExecContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + if exec, err := s.getExec(id); err == nil { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(exec) + return + } + w.WriteHeader(http.StatusNotFound) +} + +func (s *DockerServer) getExec(id string) (*docker.ExecInspect, error) { + s.execMut.RLock() + defer s.execMut.RUnlock() + for _, exec := range s.execs { + if exec.ID == id { + return exec, nil + } + } + return nil, errors.New("exec not found") +} + +func (s *DockerServer) findNetwork(idOrName string) (*docker.Network, int, error) { + s.netMut.RLock() + defer s.netMut.RUnlock() + for i, network := range s.networks { + if network.ID == idOrName || network.Name == idOrName { + return network, i, nil + } + } + return nil, -1, errors.New("No such network") +} + +func (s *DockerServer) listNetworks(w http.ResponseWriter, r *http.Request) { + s.netMut.RLock() + result := make([]docker.Network, 0, len(s.networks)) + for _, network := range s.networks { + result = append(result, *network) + } + s.netMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) networkInfo(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + network, _, err := s.findNetwork(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(network) +} + +// isValidName validates configuration objects supported by libnetwork +func isValidName(name string) bool { + if name == "" || strings.Contains(name, ".") { + return false + } + return true +} + +func (s *DockerServer) createNetwork(w http.ResponseWriter, r *http.Request) { + var config *docker.CreateNetworkOptions + defer r.Body.Close() + err := json.NewDecoder(r.Body).Decode(&config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !isValidName(config.Name) { + http.Error(w, "Invalid network name", http.StatusBadRequest) + return + } + if n, _, _ := s.findNetwork(config.Name); n != nil { + http.Error(w, "network already exists", http.StatusForbidden) + return + } + + generatedID := s.generateID() + network := docker.Network{ + Name: config.Name, + ID: generatedID, + Type: config.NetworkType, + } + s.netMut.Lock() + s.networks = append(s.networks, &network) + s.netMut.Unlock() + w.WriteHeader(http.StatusCreated) + var c = struct{ ID string }{ID: network.ID} + json.NewEncoder(w).Encode(c) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go new file mode 100644 index 000000000..36789abb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go @@ -0,0 +1,1784 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bytes" + "encoding/json" + "fmt" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/fsouza/go-dockerclient" +) + +func TestNewServer(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.listener.Close() + conn, err := net.Dial("tcp", server.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + conn.Close() +} + +func TestServerStop(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + server.Stop() + _, err = net.Dial("tcp", server.listener.Addr().String()) + if err == nil { + t.Error("Unexpected error when dialing to stopped server") + } +} + +func TestServerStopNoListener(t *testing.T) { + server := DockerServer{} + server.Stop() +} + +func TestServerURL(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + url := server.URL() + if expected := "http://" + server.listener.Addr().String() + "/"; url != expected { + t.Errorf("DockerServer.URL(): Want %q. Got %q.", expected, url) + } +} + +func TestServerURLNoListener(t *testing.T) { + server := DockerServer{} + url := server.URL() + if url != "" { + t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url) + } +} + +func TestHandleWithHook(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, func(*http.Request) { called = true }) + defer server.Stop() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("ServeHTTP did not call the hook function.") + } +} + +func TestSetHook(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + defer server.Stop() + server.SetHook(func(*http.Request) { called = true }) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("ServeHTTP did not call the hook function.") + } +} + +func TestCustomHandler(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 2) + server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + fmt.Fprint(w, "Hello world") + })) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("Did not call the custom handler") + } + if got := recorder.Body.String(); got != "Hello world" { + t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) + } +} + +func TestCustomHandlerRegexp(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 2) + server.CustomHandler("/containers/.*/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + fmt.Fprint(w, "Hello world") + })) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/.*/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("Did not call the custom handler") + } + if got := recorder.Body.String(); got != "Hello world" { + t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) + } +} + +func TestListContainers(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.APIContainers, 2) + for i, container := range server.containers { + expected[i] = docker.APIContainers{ + ID: container.ID, + Image: container.Image, + Command: strings.Join(container.Config.Cmd, " "), + Created: container.Created.Unix(), + Status: container.State.String(), + Ports: container.NetworkSettings.PortMappingAPI(), + Names: []string{"/" + container.Name}, + } + } + var got []docker.APIContainers + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got) + } +} + +func TestListRunningContainers(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=0", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListRunningContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got []docker.APIContainers + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("ListRunningContainers: Want 0. Got %d.", len(got)) + } +} + +func TestCreateContainer(t *testing.T) { + server := DockerServer{} + server.imgIDs = map[string]string{"base": "a1234"} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Container + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.containers[0] + if returned.ID != stored.ID { + t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) + } + if stored.State.Running { + t.Errorf("CreateContainer should not set container to running state.") + } + if stored.Config.User != "ubuntu" { + t.Errorf("CreateContainer: wrong config. Expected: %q. Returned: %q.", "ubuntu", stored.Config.User) + } + if stored.Config.Hostname != returned.ID[:12] { + t.Errorf("CreateContainer: wrong hostname. Expected: %q. Returned: %q.", returned.ID[:12], stored.Config.Hostname) + } + expectedBind := []string{"/var/run/docker.sock:/var/run/docker.sock:rw"} + if !reflect.DeepEqual(stored.HostConfig.Binds, expectedBind) { + t.Errorf("CreateContainer: wrong host config. Expected: %v. Returned %v.", expectedBind, stored.HostConfig.Binds) + } +} + +func TestCreateContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.imgIDs = map[string]string{"base": "a1234"} + server.cChan = ch + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if notified := <-ch; notified != server.containers[0] { + t.Errorf("CreateContainer: did not notify the proper container. Want %q. Got %q.", server.containers[0].ID, notified.ID) + } +} + +func TestCreateContainerInvalidBody(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader("whaaaaaat---")) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCreateContainerDuplicateName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.imgIDs = map[string]string{"base": "a1234"} + addContainers(&server, 1) + server.containers[0].Name = "mycontainer" + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` + request, _ := http.NewRequest("POST", "/containers/create?name=mycontainer", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusConflict { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusConflict, recorder.Code) + } +} + +func TestCreateMultipleContainersEmptyName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + server.imgIDs = map[string]string{"base": "a1234"} + addContainers(&server, 1) + server.containers[0].Name = "" + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Container + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.containers[1] + if returned.ID != stored.ID { + t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) + } + if stored.State.Running { + t.Errorf("CreateContainer should not set container to running state.") + } + if stored.Config.User != "ubuntu" { + t.Errorf("CreateContainer: wrong config. Expected: %q. Returned: %q.", "ubuntu", stored.Config.User) + } + expectedBind := []string{"/var/run/docker.sock:/var/run/docker.sock:rw"} + if !reflect.DeepEqual(stored.HostConfig.Binds, expectedBind) { + t.Errorf("CreateContainer: wrong host config. Expected: %v. Returned %v.", expectedBind, stored.HostConfig.Binds) + } +} + +func TestCreateContainerInvalidName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], +"Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create?name=myapp/container1", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } + expectedBody := "Invalid container name\n" + if got := recorder.Body.String(); got != expectedBody { + t.Errorf("CreateContainer: wrong body. Want %q. Got %q.", expectedBody, got) + } +} + +func TestCreateContainerImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], +"Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRenameContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + newName := server.containers[0].Name + "abc" + path := fmt.Sprintf("/containers/%s/rename?name=%s", server.containers[0].ID, newName) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RenameContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + container := server.containers[0] + if container.Name != newName { + t.Errorf("RenameContainer: did not rename the container. Want %q. Got %q.", newName, container.Name) + } +} + +func TestRenameContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/containers/blabla/rename?name=something", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RenameContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestCommitContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("CommitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := fmt.Sprintf(`{"ID":"%s"}`, server.images[0].ID) + if got := recorder.Body.String(); got != expected { + t.Errorf("CommitContainer: wrong response body. Want %q. Got %q.", expected, got) + } +} + +func TestCommitContainerComplete(t *testing.T) { + server := DockerServer{} + server.imgIDs = make(map[string]string) + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&m=saving&author=developers" + queryString += `&run={"Cmd": ["cat", "/world"],"PortSpecs":["22"]}` + request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) + server.ServeHTTP(recorder, request) + image := server.images[0] + if image.Parent != server.containers[0].Image { + t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) + } + if image.Container != server.containers[0].ID { + t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) + } + message := "saving" + if image.Comment != message { + t.Errorf("CommitContainer: wrong comment (commit message). Want %q. Got %q.", message, image.Comment) + } + author := "developers" + if image.Author != author { + t.Errorf("CommitContainer: wrong author. Want %q. Got %q.", author, image.Author) + } + if id := server.imgIDs["tsuru/python"]; id != image.ID { + t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) + } + portSpecs := []string{"22"} + if !reflect.DeepEqual(image.Config.PortSpecs, portSpecs) { + t.Errorf("CommitContainer: wrong port spec in config. Want %#v. Got %#v.", portSpecs, image.Config.PortSpecs) + } + cmd := []string{"cat", "/world"} + if !reflect.DeepEqual(image.Config.Cmd, cmd) { + t.Errorf("CommitContainer: wrong cmd in config. Want %#v. Got %#v.", cmd, image.Config.Cmd) + } +} + +func TestCommitContainerWithTag(t *testing.T) { + server := DockerServer{} + server.imgIDs = make(map[string]string) + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&tag=v1" + request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) + server.ServeHTTP(recorder, request) + image := server.images[0] + if image.Parent != server.containers[0].Image { + t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) + } + if image.Container != server.containers[0].ID { + t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) + } + if id := server.imgIDs["tsuru/python:v1"]; id != image.ID { + t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) + } +} + +func TestCommitContainerInvalidRun(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID+"&run=abc---", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCommitContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container=abc123", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestInspectContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := server.containers[0] + var got docker.Container + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Config, expected.Config) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } + if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } + got.State.StartedAt = expected.State.StartedAt + got.State.FinishedAt = expected.State.FinishedAt + got.Config = expected.Config + got.Created = expected.Created + got.NetworkSettings = expected.NetworkSettings + if !reflect.DeepEqual(got, *expected) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } +} + +func TestInspectContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/abc123/json", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("InspectContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestTopContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got docker.TopResult + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Titles, []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}) { + t.Fatalf("TopContainer: Unexpected titles, got: %#v", got.Titles) + } + if len(got.Processes) != 1 { + t.Fatalf("TopContainer: Unexpected process len, got: %d", len(got.Processes)) + } + if got.Processes[0][len(got.Processes[0])-1] != "ls -la .." { + t.Fatalf("TopContainer: Unexpected command name, got: %s", got.Processes[0][len(got.Processes[0])-1]) + } +} + +func TestTopContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/xyz/top", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestTopContainerStopped(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } +} + +func TestStartContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + memory := int64(536870912) + hostConfig := docker.HostConfig{Memory: memory} + configBytes, err := json.Marshal(hostConfig) + if err != nil { + t.Fatal(err) + } + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer(configBytes)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if !server.containers[0].State.Running { + t.Error("StartContainer: did not set the container to running state") + } + if gotMemory := server.containers[0].HostConfig.Memory; gotMemory != memory { + t.Errorf("StartContainer: wrong HostConfig. Wants %d of memory. Got %d", memory, gotMemory) + } +} + +func TestStartContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.cChan = ch + addContainers(&server, 1) + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[1].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("{}"))) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if notified := <-ch; notified != server.containers[1] { + t.Errorf("StartContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) + } +} + +func TestStartContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/start" + request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStartContainerAlreadyRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestStopContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Running { + t.Error("StopContainer: did not stop the container") + } +} + +func TestKillContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/kill", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("KillContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Running { + t.Error("KillContainer: did not stop the container") + } +} + +func TestStopContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.cChan = ch + addContainers(&server, 1) + addContainers(&server, 1) + server.containers[1].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[1].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if notified := <-ch; notified != server.containers[1] { + t.Errorf("StopContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) + } +} + +func TestStopContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/stop" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStopContainerNotRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestPauseContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if !server.containers[0].State.Paused { + t.Error("PauseContainer: did not pause the container") + } +} + +func TestPauseContainerAlreadyPaused(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Paused = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestPauseContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/pause" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestUnpauseContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Paused = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Paused { + t.Error("UnpauseContainer: did not unpause the container") + } +} + +func TestUnpauseContainerNotPaused(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestUnpauseContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/unpause" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestWaitContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + go func() { + server.cMut.Lock() + server.containers[0].State.Running = false + server.cMut.Unlock() + }() + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := `{"StatusCode":0}` + "\n" + if body := recorder.Body.String(); body != expected { + t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestWaitContainerStatus(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + server.containers[0].State.ExitCode = 63 + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := `{"StatusCode":63}` + "\n" + if body := recorder.Body.String(); body != expected { + t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestWaitContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/wait" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("WaitContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestAttachContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + lines := []string{ + fmt.Sprintf("\x01\x00\x00\x00\x03\x00\x00\x00Container %q is running", server.containers[0].ID), + "What happened?", + "Something happened", + } + expected := strings.Join(lines, "\n") + "\n" + if body := recorder.Body.String(); body == expected { + t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestAttachContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/attach?logs=1" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("AttachContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestRemoveContainerByName(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].Name) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestRemoveContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/abc123") + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveContainerRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } + if len(server.containers) < 1 { + t.Error("RemoveContainer: should not remove the container.") + } +} + +func TestRemoveContainerRunningForce(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s?%s", server.containers[0].ID, "force=1") + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestPullImage(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/create?fromImage=base", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if len(server.images) != 1 { + t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) + } + if _, ok := server.imgIDs["base"]; !ok { + t.Error("PullImage: Repository should not be empty.") + } +} + +func TestPullImageWithTag(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/create?fromImage=base&tag=tag", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if len(server.images) != 1 { + t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) + } + if _, ok := server.imgIDs["base:tag"]; !ok { + t.Error("PullImage: Repository should not be empty.") + } +} + +func TestPushImage(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestPushImageWithTag(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python:v1": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push?tag=v1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestPushImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestTagImage(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python"] { + t.Errorf("TagImage: did not tag the image") + } +} + +func TestTagImageWithRepoAndTag(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python&tag=v1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python:v1"] { + t.Errorf("TagImage: did not tag the image") + } +} + +func TestTagImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/tag", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func addContainers(server *DockerServer, n int) { + server.cMut.Lock() + defer server.cMut.Unlock() + for i := 0; i < n; i++ { + date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) + container := docker.Container{ + Name: fmt.Sprintf("%x", rand.Int()%10000), + ID: fmt.Sprintf("%x", rand.Int()%10000), + Created: date, + Path: "ls", + Args: []string{"-la", ".."}, + Config: &docker.Config{ + Hostname: fmt.Sprintf("docker-%d", i), + AttachStdout: true, + AttachStderr: true, + Env: []string{"ME=you", fmt.Sprintf("NUMBER=%d", i)}, + Cmd: []string{"ls", "-la", ".."}, + Image: "base", + }, + State: docker.State{ + Running: false, + Pid: 400 + i, + ExitCode: 0, + StartedAt: date, + }, + Image: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + NetworkSettings: &docker.NetworkSettings{ + IPAddress: fmt.Sprintf("10.10.10.%d", i+2), + IPPrefixLen: 24, + Gateway: "10.10.10.1", + Bridge: "docker0", + PortMapping: map[string]docker.PortMapping{ + "Tcp": {"8888": fmt.Sprintf("%d", 49600+i)}, + }, + }, + ResolvConfPath: "/etc/resolv.conf", + } + server.containers = append(server.containers, &container) + } +} + +func addImages(server *DockerServer, n int, repo bool) { + server.iMut.Lock() + defer server.iMut.Unlock() + if server.imgIDs == nil { + server.imgIDs = make(map[string]string) + } + for i := 0; i < n; i++ { + date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) + image := docker.Image{ + ID: fmt.Sprintf("%x", rand.Int()%10000), + Created: date, + } + server.images = append(server.images, image) + if repo { + repo := "docker/python-" + image.ID + server.imgIDs[repo] = image.ID + } + } +} + +func TestListImages(t *testing.T) { + server := DockerServer{} + addImages(&server, 2, true) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/images/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.APIImages, 2) + for i, image := range server.images { + expected[i] = docker.APIImages{ + ID: image.ID, + Created: image.Created.Unix(), + RepoTags: []string{"docker/python-" + image.ID}, + } + } + var got []docker.APIImages + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListImages. Want %#v. Got %#v.", expected, got) + } +} + +func TestRemoveImage(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, false) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/images/%s", server.images[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.images) > 0 { + t.Error("RemoveImage: did not remove the image.") + } +} + +func TestRemoveImageByName(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, true) + server.buildMuxer() + recorder := httptest.NewRecorder() + imgName := "docker/python-" + server.images[0].ID + path := "/images/" + imgName + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.images) > 0 { + t.Error("RemoveImage: did not remove the image.") + } + _, ok := server.imgIDs[imgName] + if ok { + t.Error("RemoveImage: did not remove image tag name.") + } +} + +func TestRemoveImageWithMultipleTags(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, true) + server.buildMuxer() + imgID := server.images[0].ID + imgName := "docker/python-" + imgID + server.imgIDs["docker/python-wat"] = imgID + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/images/%s", imgName) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + _, ok := server.imgIDs[imgName] + if ok { + t.Error("RemoveImage: did not remove image tag name.") + } + id, ok := server.imgIDs["docker/python-wat"] + if !ok { + t.Error("RemoveImage: removed the wrong tag name.") + } + if id != imgID { + t.Error("RemoveImage: disassociated the wrong ID from the tag") + } + if len(server.images) < 1 { + t.Fatal("RemoveImage: removed the image, but should keep it") + } + if server.images[0].ID != imgID { + t.Error("RemoveImage: changed the ID of the image!") + } +} + +func TestPrepareFailure(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + errorID := "my_error" + server.PrepareFailure(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } +} + +func TestPrepareMultiFailures(t *testing.T) { + server := DockerServer{multiFailures: []map[string]string{}} + server.buildMuxer() + errorID := "multi error" + server.PrepareMultiFailures(errorID, "containers/json") + server.PrepareMultiFailures(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if recorder.Body.String() == errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } +} + +func TestRemoveFailure(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + errorID := "my_error" + server.PrepareFailure(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + server.ResetFailure(errorID) + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("RemoveFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestResetMultiFailures(t *testing.T) { + server := DockerServer{multiFailures: []map[string]string{}} + server.buildMuxer() + errorID := "multi error" + server.PrepareMultiFailures(errorID, "containers/json") + server.PrepareMultiFailures(errorID, "containers/json") + if len(server.multiFailures) != 2 { + t.Errorf("PrepareMultiFailures: error adding multi failures.") + } + server.ResetMultiFailures() + if len(server.multiFailures) != 0 { + t.Errorf("ResetMultiFailures: error reseting multi failures.") + } +} + +func TestMutateContainer(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + server.containers = append(server.containers, &docker.Container{ID: "id123"}) + state := docker.State{Running: false, ExitCode: 1} + err := server.MutateContainer("id123", state) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(server.containers[0].State, state) { + t.Errorf("Wrong state after mutation.\nWant %#v.\nGot %#v.", + state, server.containers[0].State) + } +} + +func TestMutateContainerNotFound(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + state := docker.State{Running: false, ExitCode: 1} + err := server.MutateContainer("id123", state) + if err == nil { + t.Error("Unexpected error") + } + if err.Error() != "container not found" { + t.Errorf("wrong error message. Want %q. Got %q.", "container not found", err) + } +} + +func TestBuildImageWithContentTypeTar(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + imageName := "teste" + recorder := httptest.NewRecorder() + tarFile, err := os.Open("data/dockerfile.tar") + if err != nil { + t.Fatal(err) + } + defer tarFile.Close() + request, _ := http.NewRequest("POST", "/build?t=teste", tarFile) + request.Header.Add("Content-Type", "application/tar") + server.buildImage(recorder, request) + if recorder.Body.String() == "miss Dockerfile" { + t.Errorf("BuildImage: miss Dockerfile") + return + } + if _, ok := server.imgIDs[imageName]; ok == false { + t.Errorf("BuildImage: image %s not builded", imageName) + } +} + +func TestBuildImageWithRemoteDockerfile(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + imageName := "teste" + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/build?t=teste&remote=http://localhost/Dockerfile", nil) + server.buildImage(recorder, request) + if _, ok := server.imgIDs[imageName]; ok == false { + t.Errorf("BuildImage: image %s not builded", imageName) + } +} + +func TestPing(t *testing.T) { + server := DockerServer{} + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/_ping", nil) + server.pingDocker(recorder, request) + if recorder.Body.String() != "" { + t.Errorf("Ping: Unexpected body: %s", recorder.Body.String()) + } + if recorder.Code != http.StatusOK { + t.Errorf("Ping: Expected code %d, got: %d", http.StatusOK, recorder.Code) + } +} + +func TestDefaultHandler(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.listener.Close() + if server.mux != server.DefaultHandler() { + t.Fatalf("DefaultHandler: Expected to return server.mux, got: %#v", server.DefaultHandler()) + } +} + +func TestCreateExecContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + serverExec := server.execs[0] + var got docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if got.ID != serverExec.ID { + t.Errorf("CreateExec: wrong value. Want %#v. Got %#v.", serverExec.ID, got.ID) + } + expected := docker.ExecInspect{ + ID: got.ID, + ProcessConfig: docker.ExecProcessConfig{ + EntryPoint: "bash", + Arguments: []string{"-c", "ls"}, + }, + Container: *server.containers[0], + } + if !reflect.DeepEqual(*serverExec, expected) { + t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, *serverExec) + } +} + +func TestInspectExecContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + path = fmt.Sprintf("/exec/%s/json", got.ID) + request, _ = http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got2 docker.ExecInspect + err = json.NewDecoder(recorder.Body).Decode(&got2) + if err != nil { + t.Fatal(err) + } + expected := docker.ExecInspect{ + ID: got.ID, + ProcessConfig: docker.ExecProcessConfig{ + EntryPoint: "bash", + Arguments: []string{"-c", "ls"}, + }, + Container: *server.containers[0], + } + got2.Container.State.StartedAt = expected.Container.State.StartedAt + got2.Container.State.FinishedAt = expected.Container.State.FinishedAt + got2.Container.Config = expected.Container.Config + got2.Container.Created = expected.Container.Created + got2.Container.NetworkSettings = expected.Container.NetworkSettings + if !reflect.DeepEqual(got2, expected) { + t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, got2) + } +} + +func TestStartExecContainer(t *testing.T) { + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var exec docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&exec) + if err != nil { + t.Fatal(err) + } + unleash := make(chan bool) + server.PrepareExec(exec.ID, func() { + <-unleash + }) + codes := make(chan int, 1) + sent := make(chan bool) + go func() { + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/exec/%s/start", exec.ID) + body := `{"Tty":true}` + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + close(sent) + server.ServeHTTP(recorder, request) + codes <- recorder.Code + }() + <-sent + execInfo, err := waitExec(server.URL(), exec.ID, true, 5) + if err != nil { + t.Fatal(err) + } + if !execInfo.Running { + t.Error("StartExec: expected exec to be running, but it's not running") + } + close(unleash) + if code := <-codes; code != http.StatusOK { + t.Errorf("StartExec: wrong status. Want %d. Got %d.", http.StatusOK, code) + } + execInfo, err = waitExec(server.URL(), exec.ID, false, 5) + if err != nil { + t.Fatal(err) + } + if execInfo.Running { + t.Error("StartExec: expected exec to be not running after start returns, but it's running") + } +} + +func TestStartExecContainerWildcardCallback(t *testing.T) { + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Cmd": ["bash", "-c", "ls"]}` + path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + unleash := make(chan bool) + server.PrepareExec("*", func() { + <-unleash + }) + var exec docker.Exec + err := json.NewDecoder(recorder.Body).Decode(&exec) + if err != nil { + t.Fatal(err) + } + codes := make(chan int, 1) + sent := make(chan bool) + go func() { + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/exec/%s/start", exec.ID) + body := `{"Tty":true}` + request, _ := http.NewRequest("POST", path, strings.NewReader(body)) + close(sent) + server.ServeHTTP(recorder, request) + codes <- recorder.Code + }() + <-sent + execInfo, err := waitExec(server.URL(), exec.ID, true, 5) + if err != nil { + t.Fatal(err) + } + if !execInfo.Running { + t.Error("StartExec: expected exec to be running, but it's not running") + } + close(unleash) + if code := <-codes; code != http.StatusOK { + t.Errorf("StartExec: wrong status. Want %d. Got %d.", http.StatusOK, code) + } + execInfo, err = waitExec(server.URL(), exec.ID, false, 5) + if err != nil { + t.Fatal(err) + } + if execInfo.Running { + t.Error("StartExec: expected exec to be not running after start returns, but it's running") + } +} + +func TestStartExecContainerNotFound(t *testing.T) { + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Tty":true}` + request, _ := http.NewRequest("POST", "/exec/something-wat/start", strings.NewReader(body)) + server.ServeHTTP(recorder, request) +} + +func waitExec(url, execID string, running bool, maxTry int) (*docker.ExecInspect, error) { + client, err := docker.NewClient(url) + if err != nil { + return nil, err + } + exec, err := client.InspectExec(execID) + for i := 0; i < maxTry && exec.Running != running && err == nil; i++ { + time.Sleep(100e6) + exec, err = client.InspectExec(exec.ID) + } + return exec, err +} + +func TestStatsContainer(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + addContainers(server, 2) + server.buildMuxer() + expected := docker.Stats{} + expected.CPUStats.CPUUsage.TotalUsage = 20 + server.PrepareStats(server.containers[0].ID, func(id string) docker.Stats { + return expected + }) + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stats?stream=false", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StatsContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + body := recorder.Body.Bytes() + var got docker.Stats + err = json.Unmarshal(body, &got) + if err != nil { + t.Fatal(err) + } + got.Read = time.Time{} + if !reflect.DeepEqual(got, expected) { + t.Errorf("StatsContainer: wrong value. Want %#v. Got %#v.", expected, got) + } +} + +type safeWriter struct { + sync.Mutex + *httptest.ResponseRecorder +} + +func (w *safeWriter) Write(buf []byte) (int, error) { + w.Lock() + defer w.Unlock() + return w.ResponseRecorder.Write(buf) +} + +func TestStatsContainerStream(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + addContainers(server, 2) + server.buildMuxer() + expected := docker.Stats{} + expected.CPUStats.CPUUsage.TotalUsage = 20 + server.PrepareStats(server.containers[0].ID, func(id string) docker.Stats { + time.Sleep(50 * time.Millisecond) + return expected + }) + recorder := &safeWriter{ + ResponseRecorder: httptest.NewRecorder(), + } + path := fmt.Sprintf("/containers/%s/stats?stream=true", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + go func() { + server.ServeHTTP(recorder, request) + }() + time.Sleep(200 * time.Millisecond) + recorder.Lock() + defer recorder.Unlock() + body := recorder.Body.Bytes() + parts := bytes.Split(body, []byte("\n")) + if len(parts) < 2 { + t.Errorf("StatsContainer: wrong number of parts. Want at least 2. Got %#v.", len(parts)) + } + var got docker.Stats + err = json.Unmarshal(parts[0], &got) + if err != nil { + t.Fatal(err) + } + got.Read = time.Time{} + if !reflect.DeepEqual(got, expected) { + t.Errorf("StatsContainer: wrong value. Want %#v. Got %#v.", expected, got) + } +} + +func addNetworks(server *DockerServer, n int) { + server.netMut.Lock() + defer server.netMut.Unlock() + for i := 0; i < n; i++ { + netid := fmt.Sprintf("%x", rand.Int()%10000) + network := docker.Network{ + Name: netid, + ID: fmt.Sprintf("%x", rand.Int()%10000), + Type: "bridge", + Endpoints: []*docker.Endpoint{ + &docker.Endpoint{ + Name: "blah", + ID: fmt.Sprintf("%x", rand.Int()%10000), + Network: netid, + }, + }, + } + server.networks = append(server.networks, &network) + } +} + +func TestListNetworks(t *testing.T) { + server := DockerServer{} + addNetworks(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/networks", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListNetworks: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.Network, 2) + for i, network := range server.networks { + expected[i] = docker.Network{ + ID: network.ID, + Name: network.Name, + Type: network.Type, + Endpoints: network.Endpoints, + } + } + var got []docker.Network + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListNetworks. Want %#v. Got %#v.", expected, got) + } +} + +type createNetworkResponse struct { + ID string `json:"ID"` +} + +func TestCreateNetwork(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + netid := fmt.Sprintf("%x", rand.Int()%10000) + netname := fmt.Sprintf("%x", rand.Int()%10000) + body := fmt.Sprintf(`{"ID": "%s", "Name": "%s", "Type": "bridge" }`, netid, netname) + request, _ := http.NewRequest("POST", "/networks", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + + var returned createNetworkResponse + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.networks[0] + if returned.ID != stored.ID { + t.Errorf("CreateNetwork: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned) + } +} + +func TestCreateNetworkInvalidBody(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/networks", strings.NewReader("whaaaaaat---")) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCreateNetworkDuplicateName(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + addNetworks(&server, 1) + server.networks[0].Name = "mynetwork" + recorder := httptest.NewRecorder() + body := fmt.Sprintf(`{"ID": "%s", "Name": "mynetwork", "Type": "bridge" }`, fmt.Sprintf("%x", rand.Int()%10000)) + request, _ := http.NewRequest("POST", "/networks", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusForbidden { + t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusForbidden, recorder.Code) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go new file mode 100644 index 000000000..11d571761 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go @@ -0,0 +1,100 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// The content is borrowed from Docker's own source code to provide a simple +// tls based dialer + +package docker + +import ( + "crypto/tls" + "errors" + "net" + "strings" + "time" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 000000000..ba8e0cb3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 000000000..2f4e3c2f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - tip + +before_script: + - mysql -e 'create database gotest;' diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 000000000..6fc4c6f7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,44 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Arne Hormann +Carlos Nieto +Chris Moos +DisposaBoy +Frederick Mayle +Gustavo Kristic +Hanno Braun +Henri Yandell +INADA Naoki +James Harr +Jian Zhen +Joshua Prunier +Julien Schmidt +Kamil Dziedzic +Leonardo YongUk Kim +Lucas Liu +Luke Scott +Michael Woolnough +Nicola Peduzzi +Runrioter Wung +Soroush Pour +Stan Putrya +Xiaobing Jiang +Xiuming Chen + +# Organizations + +Barracuda Networks, Inc. +Google Inc. +Stripe Inc. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 000000000..161ad0fcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,92 @@ +## HEAD + +Changes: + + - Go 1.1 is no longer supported + - Use decimals field from MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + +Bugfixes: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + +New Features: + - Support for returning table alias on Columns() (#289) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 000000000..f87c19824 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +Please provide the following minimum information: +* Your Go-MySQL-Driver version (or git SHA) +* Your Go version (run `go version` in your console) +* A detailed issue description +* Error Log if present +* If possible, a short example + + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Pull Requests Checklist + +Please check the following points before submitting your pull request: +- [x] Code compiles correctly +- [x] Created tests, if possible +- [x] All tests pass +- [x] Extended the README / documentation, if necessary +- [x] Added yourself to the AUTHORS file + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 000000000..14e2f777f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 000000000..6a2bb2ca3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,386 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases) + +[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql) + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6 or Unix domain sockets + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.2 or higher + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host:port`. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` + + +##### `strict` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`strict=true` enables the strict mode in which MySQL warnings are treated as errors. + +By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example. + + +##### `timeout` + +``` +Type: decimal number +Default: OS default +``` + +*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### System Variables + +All other parameters are interpreted as system variables: + * `autocommit`: `"SET autocommit="` + * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone="` + * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="` + * `param`: `"SET ="` + +*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!* + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Use the [strict mode](#strict) but ignore notes: +``` +user:password@/dbname?strict=true&sql_notes=false +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine: +``` +user@cloudsql(project-id:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. + +See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** + +Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 000000000..565614eef --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 000000000..fb8a2f5f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,246 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + if w, ok := err.(MySQLWarnings); ok { + b.Logf("Warning on %q: %v", query, w) + } else { + b.Fatalf("Error on %q: %v", query, err) + } + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkInterpolation(b *testing.B) { + mc := &mysqlConn{ + cfg: &config{ + interpolateParams: true, + loc: time.UTC, + }, + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + buf: newBuffer(nil), + } + + args := []driver.Value{ + int64(42424242), + float64(math.Pi), + false, + time.Unix(1423411542, 807015000), + []byte("bytes containing special chars ' \" \a \x00"), + "string containing special chars ' \" \a \x00", + } + q := "SELECT ?, ?, ?, ?, ?, ?" + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := mc.interpolateParams(q, args) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 000000000..509ce89e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,136 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import "io" + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + rd io.Reader + idx int + length int +} + +func newBuffer(rd io.Reader) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + rd: rd, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + nn, err := b.rd.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 000000000..6c1d613d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,250 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation byte = 33 // utf8_general_ci + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[byte]bool{ + 1: true, // big5_chinese_ci + 13: true, // sjis_japanese_ci + 28: true, // gbk_chinese_ci + 84: true, // big5_bin + 86: true, // gb2312_bin + 87: true, // gbk_bin + 88: true, // sjis_bin + 95: true, // cp932_japanese_ci + 96: true, // cp932_bin +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 000000000..caaae013f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,403 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql/driver" + "errors" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *config + maxPacketAllowed int + maxWriteSize int + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + strict bool +} + +type config struct { + user string + passwd string + net string + addr string + dbname string + params map[string]string + loc *time.Location + tls *tls.Config + timeout time.Duration + collation uint8 + allowAllFiles bool + allowOldPasswords bool + allowCleartextPasswords bool + clientFoundRows bool + columnsWithAlias bool + interpolateParams bool +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // time.Time parsing + case "parseTime": + var isBool bool + mc.parseTime, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Strict mode + case "strict": + var isBool bool + mc.strict, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Compression + case "compress": + err = errors.New("Compression not implemented yet") + return + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + err := mc.exec("START TRANSACTION") + if err == nil { + return &mysqlTx{mc}, err + } + + return nil, err +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if mc.netConn != nil { + err = mc.writeCommandPacket(comQuit) + if err == nil { + err = mc.netConn.Close() + } else { + mc.netConn.Close() + } + mc.netConn = nil + } + + mc.cfg = nil + mc.buf.rd = nil + + return +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, err + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", driver.ErrBadConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxPacketAllowed { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, err +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err != nil { + return err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil && resLen > 0 { + if err = mc.readUntilEOF(); err != nil { + return err + } + + err = mc.readUntilEOF() + } + + return err +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + // no columns, no more data + return emptyRows{}, nil + } + // Columns + rows.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, err +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 000000000..dddc12908 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,162 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + minProtocolVersion byte = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +const ( + fieldTypeDecimal byte = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeNewDecimal byte = iota + 0xf6 + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 000000000..d310624ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,149 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// This struct is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var dials map[string]DialFunc + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + } + mc.cfg, err = parseDSN(dsn) + if err != nil { + return nil, err + } + + // Connect to Server + if dial, ok := dials[mc.cfg.net]; ok { + mc.netConn, err = dial(mc.cfg.addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.timeout} + mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + mc.buf = newBuffer(mc.netConn) + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.Close() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + + // Read Result Packet + err = mc.readResultOK() + if err != nil { + // Retry with old authentication method, if allowed + if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword { + if err = mc.writeOldAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + if err = mc.readResultOK(); err != nil { + mc.Close() + return nil, err + } + } else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword { + if err = mc.writeClearAuthPacket(); err != nil { + mc.Close() + return nil, err + } + if err = mc.readResultOK(); err != nil { + mc.Close() + return nil, err + } + } else { + mc.Close() + return nil, err + } + + } + + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxPacketAllowed = stringToInt(maxap) - 1 + if mc.maxPacketAllowed < maxPacketSize { + mc.maxWriteSize = mc.maxPacketAllowed + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 000000000..f9da416ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1681 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + user string + pass string + prot string + addr string + dbname string + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + // get environment variables + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user = env("MYSQL_TEST_USER", "root") + pass = env("MYSQL_TEST_PASS", "") + prot = env("MYSQL_TEST_PROT", "tcp") + addr = env("MYSQL_TEST_ADDR", "localhost:3306") + dbname = env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dsn2 := dsn + "&interpolateParams=true" + var db2 *sql.DB + if _, err := parseDSN(dsn2); err != errInvalidDSNUnsafeCollation { + db2, err = sql.Open("mysql", dsn2) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db2.Close() + } + + dbt := &DBTest{t, db} + dbt2 := &DBTest{t, db2} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + if db2 != nil { + test(dbt2) + dbt2.db.Exec("DROP TABLE IF EXISTS test") + } + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("Error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("Exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("Query", query, err) + } + return rows +} + +func TestEmptyQuery(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // just a comment, no query + rows := dbt.mustQuery("--") + // will hang before #255 + if rows.Next() { + dbt.Errorf("Next on rows must be false") + } + }) +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("Expected InsertID 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("Expected 0 affected row, got %d", count) + } + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë ÃrvíztűrÅ‘ ã„ã‚ã¯ã«ã»ã¸ã¨ã¡ã‚Šã¬ã‚‹ã‚’ イロãƒãƒ‹ãƒ›ãƒ˜ãƒˆ דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +type timeTests struct { + dbtype string + tlayout string + tests []timeTest +} + +type timeTest struct { + s string // leading "!": do not use t as value in queries + t time.Time +} + +type timeMode byte + +func (t timeMode) String() string { + switch t { + case binaryString: + return "binary:string" + case binaryTime: + return "binary:time.Time" + case textString: + return "text:string" + } + panic("unsupported timeMode") +} + +func (t timeMode) Binary() bool { + switch t { + case binaryString, binaryTime: + return true + } + return false +} + +const ( + binaryString timeMode = iota + binaryTime + textString +) + +func (t timeTest) genQuery(dbtype string, mode timeMode) string { + var inner string + if mode.Binary() { + inner = "?" + } else { + inner = `"%s"` + } + return `SELECT cast(` + inner + ` as ` + dbtype + `)` +} + +func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { + var rows *sql.Rows + query := t.genQuery(dbtype, mode) + switch mode { + case binaryString: + rows = dbt.mustQuery(query, t.s) + case binaryTime: + rows = dbt.mustQuery(query, t.t) + case textString: + query = fmt.Sprintf(query, t.s) + rows = dbt.mustQuery(query) + default: + panic("unsupported mode") + } + defer rows.Close() + var err error + if !rows.Next() { + err = rows.Err() + if err == nil { + err = fmt.Errorf("no data") + } + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + var dst interface{} + err = rows.Scan(&dst) + if err != nil { + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + switch val := dst.(type) { + case []uint8: + str := string(val) + if str == t.s { + return + } + if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { + // a fix mainly for TravisCI: + // accept full microsecond resolution in result for DATETIME columns + // where the binary protocol was used + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, str, + ) + case time.Time: + if val == t.t { + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, val.Format(tlayout), + ) + default: + fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) + dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", + dbtype, mode, + val, val, + ) + } +} + +func TestDateTime(t *testing.T) { + afterTime := func(t time.Time, d string) time.Time { + dur, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return t.Add(dur) + } + // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests + format := "2006-01-02 15:04:05.999999" + t0 := time.Time{} + tstr0 := "0000-00-00 00:00:00.000000" + testcases := []timeTests{ + {"DATE", format[:10], []timeTest{ + {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, + {t: t0, s: tstr0[:10]}, + }}, + {"DATETIME", format[:19], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(0)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(1)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, + {t: t0, s: tstr0[:21]}, + }}, + {"DATETIME(6)", format, []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, + {t: t0, s: tstr0}, + }}, + {"TIME", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(0)", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(1)", format[11:21], []timeTest{ + {t: afterTime(t0, "12345600ms")}, + {s: "!-12:34:56.7"}, + {s: "!-838:59:58.9"}, + {s: "!838:59:58.9"}, + {t: t0, s: tstr0[11:21]}, + }}, + {"TIME(6)", format[11:], []timeTest{ + {t: afterTime(t0, "1234567890123000ns")}, + {s: "!-12:34:56.789012"}, + {s: "!-838:59:58.999999"}, + {s: "!838:59:58.999999"}, + {t: t0, s: tstr0[11:]}, + }}, + } + dsns := []string{ + dsn + "&parseTime=true", + dsn + "&parseTime=false", + } + for _, testdsn := range dsns { + runTests(t, testdsn, func(dbt *DBTest) { + microsecsSupported := false + zeroDateSupported := false + var rows *sql.Rows + var err error + rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) + if err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) + if err == nil { + rows.Scan(&zeroDateSupported) + rows.Close() + } + for _, setups := range testcases { + if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { + // skip fractional second tests if unsupported by server + continue + } + for _, setup := range setups.tests { + allowBinTime := true + if setup.s == "" { + // fill time string whereever Go can reliable produce it + setup.s = setup.t.Format(setups.tlayout) + } else if setup.s[0] == '!' { + // skip tests using setup.t as source in queries + allowBinTime = false + // fix setup.s - remove the "!" + setup.s = setup.s[1:] + } + if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { + // skip disallowed 0000-00-00 date + continue + } + setup.run(dbt, setups.dbtype, setups.tlayout, textString) + setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) + if allowBinTime { + setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) + } + } + } + }) + } +} + +func TestTimestampMicros(t *testing.T) { + format := "2006-01-02 15:04:05.999999" + f0 := format[:19] + f1 := format[:21] + f6 := format[:26] + runTests(t, dsn, func(dbt *DBTest) { + // check if microseconds are supported. + // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width + // and not precision. + // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + microsecsSupported := false + if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + if !microsecsSupported { + // skip test + return + } + _, err := dbt.db.Exec(` + CREATE TABLE test ( + value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', + value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', + value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' + )`, + ) + if err != nil { + dbt.Error(err) + } + defer dbt.mustExec("DROP TABLE IF EXISTS test") + dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) + var res0, res1, res6 string + rows := dbt.mustQuery("SELECT * FROM test") + if !rows.Next() { + dbt.Errorf("test contained no selectable values") + } + err = rows.Scan(&res0, &res1, &res6) + if err != nil { + dbt.Error(err) + } + if res0 != f0 { + dbt.Errorf("expected %q, got %q", f0, res0) + } + if res1 != f1 { + dbt.Errorf("expected %q, got %q", f1, res1) + } + if res6 != f6 { + dbt.Errorf("expected %q, got %q", f6, res6) + } + }) +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("Valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("Invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("Valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("Invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("Valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("Invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("Valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("Invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("Nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("Inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestUint64(t *testing.T) { + const ( + u0 = uint64(0) + uall = ^u0 + uhigh = uall >> 1 + utop = ^uhigh + s0 = int64(0) + sall = ^s0 + shigh = int64(uhigh) + stop = ^shigh + ) + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + row := stmt.QueryRow( + u0, uhigh, utop, uall, + s0, shigh, stop, sall, + ) + + var ua, ub, uc, ud uint64 + var sa, sb, sc, sd int64 + + err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) + if err != nil { + dbt.Fatal(err) + } + switch { + case ua != u0, + ub != uhigh, + uc != utop, + ud != uall, + sa != s0, + sb != shigh, + sc != stop, + sd != sall: + dbt.Fatal("Unexpected result value") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%q != %q", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("Rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent file didn't fail") + } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES" + // make sure the MySQL version is recent enough with a separate connection + // before running the test + conn, err := MySQLDriver{}.Open(relaxedDsn) + if conn != nil { + conn.Close() + } + if me, ok := err.(*MySQLError); ok && me.Number == 1231 { + // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' + // => skip test, MySQL server version is too old + return + } + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == ErrNoTLS { + dbt.Skip("Server does not support TLS") + } else { + dbt.Fatalf("Error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("No Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("Error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("Error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("Error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("Panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("Unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("Error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("Expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("Connection must not succeed without a valid charset") + } + }) +} + +func TestCollation(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + defaultCollation := "utf8_general_ci" + testCollations := []string{ + "", // do not set + defaultCollation, // driver default + "latin1_general_ci", + "binary", + "utf8_unicode_ci", + "cp1257_bin", + } + + for _, collation := range testCollations { + var expected, tdsn string + if collation != "" { + tdsn = dsn + "&collation=" + collation + expected = collation + } else { + tdsn = dsn + expected = defaultCollation + } + + runTests(t, tdsn, func(dbt *DBTest) { + var got string + if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { + dbt.Fatal(err) + } + + if got != expected { + dbt.Fatalf("Expected connection collation %s but got %s", expected, got) + } + }) + } +} + +func TestColumnsWithAlias(t *testing.T) { + runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT 1 AS A") + defer rows.Close() + cols, _ := rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A" { + t.Fatalf("expected column name \"A\", got \"%s\"", cols[0]) + } + rows.Close() + + rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A") + cols, _ = rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A.one" { + t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0]) + } + }) +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", reftime) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("Didn't get any rows out") + } + + var dbTime time.Time + err := rows.Scan(&dbTime) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if reftime.Unix() != dbTime.Unix() { + dbt.Errorf("Times don't match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) + dbt.Errorf(" Now(UTC)=%v\n", dbTime) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("Unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("Getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("1st rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("1st rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("2nd rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("Unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("2nd rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("Unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +// Regression test for +// * more than 32 NULL parameters (issue 209) +// * more parameters than fit into the buffer (issue 201) +func TestPreparedManyCols(t *testing.T) { + const numParams = defaultBufSize + runTests(t, dsn, func(dbt *DBTest) { + query := "SELECT ?" + strings.Repeat(",?", numParams-1) + stmt, err := dbt.db.Prepare(query) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + // create more parameters than fit into the buffer + // which will take nil-values + params := make([]interface{}, numParams) + rows, err := stmt.Query(params...) + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("Testing up to %d concurrent connections \r\n", max) + + var remaining, succeeded int32 = int32(max), 0 + + var wg sync.WaitGroup + wg.Add(max) + + var fatalError string + var once sync.Once + fatalf := func(s string, vals ...interface{}) { + once.Do(func() { + fatalError = fmt.Sprintf(s, vals...) + }) + } + + for i := 0; i < max; i++ { + go func(id int) { + defer wg.Done() + + tx, err := dbt.db.Begin() + atomic.AddInt32(&remaining, -1) + + if err != nil { + if err.Error() != "Error 1040: Too many connections" { + fatalf("Error on Conn %d: %s", id, err.Error()) + } + return + } + + // keep the connection busy until all connections are open + for remaining > 0 { + if _, err = tx.Exec("DO 1"); err != nil { + fatalf("Error on Conn %d: %s", id, err.Error()) + return + } + } + + if err = tx.Commit(); err != nil { + fatalf("Error on Conn %d: %s", id, err.Error()) + return + } + + // everything went fine with this connection + atomic.AddInt32(&succeeded, 1) + }(i) + } + + // wait until all conections are open + wg.Wait() + + if fatalError != "" { + dbt.Fatal(fatalError) + } + + dbt.Logf("Reached %d concurrent connections\r\n", succeeded) + }) +} + +// Tests custom dial functions +func TestCustomDial(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + // our custom dial function which justs wraps net.Dial here + RegisterDial("mydial", func(addr string) (net.Conn, error) { + return net.Dial(prot, addr) + }) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + if _, err = db.Exec("DO 1"); err != nil { + t.Fatalf("Connection failed: %s", err.Error()) + } +} + +func TestSqlInjection(t *testing.T) { + createTest := func(arg string) func(dbt *DBTest) { + return func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v INTEGER)") + dbt.mustExec("INSERT INTO test VALUES (?)", 1) + + var v int + // NULL can't be equal to anything, the idea here is to inject query so it returns row + // This test verifies that escapeQuotes and escapeBackslash are working properly + err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) + if err == sql.ErrNoRows { + return // success, sql injection failed + } else if err == nil { + dbt.Errorf("Sql injection successful with arg: %s", arg) + } else { + dbt.Errorf("Error running query with arg: %s; err: %s", arg, err.Error()) + } + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", + } + for _, testdsn := range dsns { + runTests(t, testdsn, createTest("1 OR 1=1")) + runTests(t, testdsn, createTest("' OR '1'='1")) + } +} + +// Test if inserted data is correctly retrieved after being escaped +func TestInsertRetrieveEscapedData(t *testing.T) { + testData := func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") + + // All sequences that are escaped by escapeQuotes and escapeBackslash + v := "foo \x00\n\r\x1a\"'\\" + dbt.mustExec("INSERT INTO test VALUES (?)", v) + + var out string + err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + + if out != v { + dbt.Errorf("%q != %q", out, v) + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", + } + for _, testdsn := range dsns { + runTests(t, testdsn, testData) + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 000000000..44cf30db6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,131 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "fmt" + "io" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("Invalid Connection") + ErrMalformPkt = errors.New("Malformed Packet") + ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS") + ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.") + ErrUnknownPlugin = errors.New("The authentication plugin is not supported.") + ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+") + ErrPktSync = errors.New("Commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.") + ErrBusyBuffer = errors.New("Busy buffer") +) + +var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} + +// MySQLWarnings is an error type which represents a group of one or more MySQL +// warnings +type MySQLWarnings []MySQLWarning + +func (mws MySQLWarnings) Error() string { + var msg string + for i, warning := range mws { + if i > 0 { + msg += "\r\n" + } + msg += fmt.Sprintf( + "%s %s: %s", + warning.Level, + warning.Code, + warning.Message, + ) + } + return msg +} + +// MySQLWarning is an error type which represents a single MySQL warning. +// Warnings are returned in groups only. See MySQLWarnings +type MySQLWarning struct { + Level string + Code string + Message string +} + +func (mc *mysqlConn) getWarnings() (err error) { + rows, err := mc.Query("SHOW WARNINGS", nil) + if err != nil { + return + } + + var warnings = MySQLWarnings{} + var values = make([]driver.Value, 3) + + for { + err = rows.Next(values) + switch err { + case nil: + warning := MySQLWarning{} + + if raw, ok := values[0].([]byte); ok { + warning.Level = string(raw) + } else { + warning.Level = fmt.Sprintf("%s", values[0]) + } + if raw, ok := values[1].([]byte); ok { + warning.Code = string(raw) + } else { + warning.Code = fmt.Sprintf("%s", values[1]) + } + if raw, ok := values[2].([]byte); ok { + warning.Message = string(raw) + } else { + warning.Message = fmt.Sprintf("%s", values[0]) + } + + warnings = append(warnings, warning) + + case io.EOF: + return warnings + + default: + rows.Close() + return + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go new file mode 100644 index 000000000..96f9126d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go @@ -0,0 +1,42 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "log" + "testing" +) + +func TestErrorsSetLogger(t *testing.T) { + previous := errLog + defer func() { + errLog = previous + }() + + // set up logger + const expected = "prefix: test\n" + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + logger := log.New(buffer, "prefix: ", 0) + + // print + SetLogger(logger) + errLog.Print("test") + + // check result + if actual := buffer.String(); actual != expected { + t.Errorf("expected %q, got %q", expected, actual) + } +} + +func TestErrorsStrictIgnoreNotes(t *testing.T) { + runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { + dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") + }) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 000000000..a2dedb3c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,164 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" +) + +var ( + fileRegister map[string]bool + readerRegister map[string]func() io.Reader +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + delete(fileRegister, strings.Trim(filePath, `"`)) +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + delete(readerRegister, name) +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + if handler, inMap := readerRegister[name]; inMap { + rdr = handler() + if rdr != nil { + data = make([]byte, 4+mc.maxWriteSize) + + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + if mc.cfg.allowAllFiles || fileRegister[name] { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize { + data = make([]byte, 4+fileSize) + } else if fileSize <= mc.maxPacketAllowed { + data = make([]byte, 4+mc.maxWriteSize) + } else { + err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed) + } + } + } + } else { + err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name) + } + } + + // send content packets + if err == nil { + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } else { + mc.readPacket() + } + return err +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 000000000..14395bf9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1179 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var payload []byte + for { + // Read packet header + data, err := mc.buf.readNext(4) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // Packet Length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + if pktLen < 1 { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, driver.ErrBadConn + } + + // Check Packet Sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } else { + return nil, ErrPktSync + } + } + mc.sequence++ + + // Read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + isLastPacket := (pktLen < maxPacketSize) + + // Zero allocations for non-splitting packets + if isLastPacket && payload == nil { + return data, nil + } + + payload = append(payload, data...) + + if isLastPacket { + return payload, nil + } + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxPacketAllowed { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(ErrMalformPkt) + } else { + errLog.Print(err) + } + return driver.ErrBadConn + } +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return ErrMalformPkt + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], cipher) + return b[:], nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], cipher) + return b[:], nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + mc.flags&clientLongFlag + + if mc.cfg.clientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.dbname); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + data[12] = mc.cfg.collation + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.rd = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + 23 + + // User [null terminated string] + if len(mc.cfg.user) > 0 { + pos += copy(data[pos:], mc.cfg.user) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.dbname) > 0 { + pos += copy(data[pos:], mc.cfg.dbname) + data[pos] = 0x00 + pos++ + } + + // Assume native client during response + pos += copy(data[pos:], "mysql_native_password") + data[pos] = 0x00 + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Client clear text authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeClearAuthPacket() error { + // Calculate the packet length and add a tailing 0 + pktLen := len(mc.cfg.passwd) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the clear password [null terminated string] + copy(data[4:], mc.cfg.passwd) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return mc.handleOkPacket(data) + + case iEOF: + if len(data) > 1 { + plugin := string(data[1:bytes.IndexByte(data, 0x00)]) + if plugin == "mysql_old_password" { + // using old_passwords + return ErrOldPassword + } else if plugin == "mysql_clear_password" { + // using clear text password + return ErrCleartextPassword + } else { + return ErrUnknownPlugin + } + } else { + return ErrOldPassword + } + + default: // Error otherwise + return mc.handleErrorPacket(data) + } + } + return err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = statusFlag(data[1+n+m]) | statusFlag(data[1+n+m+1])<<8 + + // warning count [2 bytes] + if !mc.strict { + return nil + } else { + pos := 1 + n + m + 2 + if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { + return mc.getWarnings() + } + return nil + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.columnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [uint8] + // Charset [charset, collation uint8] + // Length [uint32] + pos += n + 1 + 2 + 4 + + // Field type [uint8] + columns[i].fieldType = data[pos] + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc = nil + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + + // No Err and no EOF Packet + if err == nil && data[0] != iEOF { + continue + } + return err // Err or EOF + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + if !stmt.mc.strict { + return columnCount, nil + } else { + // Check for warnings count > 0, only available in MySQL > 4.1 + if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { + return columnCount, stmt.mc.getWarnings() + } + return columnCount, nil + } + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxPacketAllowed - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "Arguments count mismatch (Got: %d Has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + var val []byte + if v.IsZero() { + val = []byte("0000-00-00") + } else { + val = []byte(v.In(mc.cfg.loc).Format(timeFormat)) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(val)), + ) + paramValues = append(paramValues, val...) + + default: + return fmt.Errorf("Can't convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + rows.mc = nil + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + return io.EOF + } + + // Error otherwise + return rows.mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "MySQL protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) + default: + var dstlen uint8 + if rows.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "MySQL protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 000000000..c6438d034 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 000000000..ba606e146 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,106 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" +) + +type mysqlField struct { + tableName string + name string + flags fieldFlag + fieldType byte + decimals byte +} + +type mysqlRows struct { + mc *mysqlConn + columns []mysqlField +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +type emptyRows struct{} + +func (rows *mysqlRows) Columns() []string { + columns := make([]string, len(rows.columns)) + if rows.mc.cfg.columnsWithAlias { + for i := range columns { + if tableName := rows.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.columns[i].name + } else { + columns[i] = rows.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.columns[i].name + } + } + return columns +} + +func (rows *mysqlRows) Close() error { + mc := rows.mc + if mc == nil { + return nil + } + if mc.netConn == nil { + return ErrInvalidConn + } + + // Remove unread packets from stream + err := mc.readUntilEOF() + rows.mc = nil + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows emptyRows) Columns() []string { + return nil +} + +func (rows emptyRows) Close() error { + return nil +} + +func (rows emptyRows) Next(dest []driver.Value) error { + return io.EOF +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 000000000..6e869b340 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,150 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strconv" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int + columns []mysqlField // cached from the first query +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + if resLen > 0 { + // Columns + err = mc.readUntilEOF() + if err != nil { + return nil, err + } + + // Rows + err = mc.readUntilEOF() + } + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil + } + } + + return nil, err +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + rows.mc = mc + + if resLen > 0 { + // Columns + // If not cached, read them and cache them + if stmt.columns == nil { + rows.columns, err = mc.readColumns(resLen) + stmt.columns = rows.columns + } else { + rows.columns = stmt.columns + err = mc.readUntilEOF() + } + } + + return rows, err +} + +type converter struct{} + +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } + return c.ConvertValue(rv.Elem().Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return strconv.FormatUint(u64, 10), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 000000000..33c749b35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 000000000..6a26ad129 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,973 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "net/url" + "strings" + "time" +) + +var ( + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs + + errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset") +) + +func init() { + tlsConfigRegister = make(map[string]*tls.Config) +} + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("Key '%s' is reserved", key) + } + + tlsConfigRegister[key] = config + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + delete(tlsConfigRegister, key) +} + +// parseDSN parses the DSN string to a config +func parseDSN(dsn string) (cfg *config, err error) { + // New config with some default values + cfg = &config{ + loc: time.UTC, + collation: defaultCollation, + } + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.passwd = dsn[k+1 : j] + break + } + } + cfg.user = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.addr = dsn[k+1 : i-1] + break + } + } + cfg.net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.dbname = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if cfg.interpolateParams && unsafeCollations[cfg.collation] { + return nil, errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.net == "" { + cfg.net = "tcp" + } + + // Set default address if empty + if cfg.addr == "" { + switch cfg.net { + case "tcp": + cfg.addr = "127.0.0.1:3306" + case "unix": + cfg.addr = "/tmp/mysql.sock" + default: + return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") + } + + } + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.interpolateParams, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.allowAllFiles, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.allowCleartextPasswords, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.allowOldPasswords, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.clientFoundRows, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Collation + case "collation": + collation, ok := collations[value] + if !ok { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + err = errors.New("unknown collation") + return + } + cfg.collation = collation + break + + case "columnsWithAlias": + var isBool bool + cfg.columnsWithAlias, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // Dial Timeout + case "timeout": + cfg.timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.tls = &tls.Config{} + } + } else { + if strings.ToLower(value) == "skip-verify" { + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else if tlsConfig, ok := tlsConfigRegister[value]; ok { + if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.addr) + if err == nil { + tlsConfig.ServerName = host + } + } + + cfg.tls = tlsConfig + } else { + return fmt.Errorf("Invalid value / unknown config name: %s", value) + } + } + + default: + // lazy init + if cfg.params == nil { + cfg.params = make(map[string]string) + } + + if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("Invalid Time-String: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + if justTime { + return zeroDateTime[11 : 11+length], nil + } + return zeroDateTime[:length], nil + } + var dst []byte // return value + var pt, p1, p2, p3 byte // current digit pair + var zOffs byte // offset of value in zeroDateTime + if justTime { + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + if src[1] != 0 { + hour := uint16(src[1])*24 + uint16(src[5]) + pt = byte(hour / 100) + p1 = byte(hour - 100*uint16(pt)) + dst = append(dst, digits01[pt]) + } else { + p1 = src[5] + } + zOffs = 11 + src = src[6:] + } else { + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt = byte(year / 100) + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + } + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + if length <= byte(len(dst)) { + return dst, nil + } + src = src[2:] + if len(src) == 0 { + return append(dst, zeroDateTime[19:zOffs+length]...), nil + } + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 = byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 = byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 = byte(microsecs) + switch decimals := zOffs + length - 20; decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ), nil + case 1: + return append(dst, '.', + digits10[p1], + ), nil + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ), nil + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ), nil + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ), nil + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ), nil + } +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + switch b[0] { + + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos += 1 + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos += 1 + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 000000000..79fbdd1eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,346 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "fmt" + "testing" + "time" +) + +var testDSNs = []struct { + in string + out string + loc *time.Location +}{ + {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:true interpolateParams:false}", time.UTC}, + {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:30000000000 collation:224 allowAllFiles:true allowOldPasswords:true allowCleartextPasswords:false clientFoundRows:true columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.Local}, + {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, +} + +func TestDSNParser(t *testing.T) { + var cfg *config + var err error + var res string + + for i, tst := range testDSNs { + cfg, err = parseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + res = fmt.Sprintf("%+v", cfg) + if res != fmt.Sprintf(tst.out, tst.loc) { + t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc)) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + "user:pass@tcp(1.2.3.4:3306)", // no trailing slash + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := parseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func TestDSNWithCustomTLS(t *testing.T) { + baseDSN := "user:password@tcp(localhost:5555)/dbname?tls=" + tlsCfg := tls.Config{} + + RegisterTLSConfig("utils_test", &tlsCfg) + + // Custom TLS is missing + tst := baseDSN + "invalid_tls" + cfg, err := parseDSN(tst) + if err == nil { + t.Errorf("Invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) + } + + tst = baseDSN + "utils_test" + + // Custom TLS with a server name + name := "foohost" + tlsCfg.ServerName = name + cfg, err = parseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("Did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) + } + + // Custom TLS without a server name + name = "localhost" + tlsCfg.ServerName = "" + cfg, err = parseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("Did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) + } + + DeregisterTLSConfig("utils_test") +} + +func TestDSNUnsafeCollation(t *testing.T) { + _, err := parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") + if err != errInvalidDSNUnsafeCollation { + t.Error("Expected %v, Got %v", errInvalidDSNUnsafeCollation, err) + } + + _, err = parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=gbk_chinese_ci") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=ascii_bin&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := parseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} + +func TestLengthEncodedInteger(t *testing.T) { + var integerTests = []struct { + num uint64 + encoded []byte + }{ + {0x0000000000000000, []byte{0x00}}, + {0x0000000000000012, []byte{0x12}}, + {0x00000000000000fa, []byte{0xfa}}, + {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, + {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, + {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, + {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, + {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, + {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, + {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, + {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, + {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + + for _, tst := range integerTests { + num, isNull, numLen := readLengthEncodedInteger(tst.encoded) + if isNull { + t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) + } + if num != tst.num { + t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) + } + if numLen != len(tst.encoded) { + t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) + } + encoded := appendLengthEncodedInteger(nil, num) + if !bytes.Equal(encoded, tst.encoded) { + t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) + } + } +} + +func TestOldPass(t *testing.T) { + scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} + vectors := []struct { + pass string + out string + }{ + {" pass", "47575c5a435b4251"}, + {"pass ", "47575c5a435b4251"}, + {"123\t456", "575c47505b5b5559"}, + {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, + } + for _, tuple := range vectors { + ours := scrambleOldPassword(scramble, []byte(tuple.pass)) + if tuple.out != fmt.Sprintf("%x", ours) { + t.Errorf("Failed old password %q", tuple.pass) + } + } +} + +func TestFormatBinaryDateTime(t *testing.T) { + rawDate := [11]byte{} + binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years + rawDate[2] = 12 // months + rawDate[3] = 30 // days + rawDate[4] = 15 // hours + rawDate[5] = 46 // minutes + rawDate[6] = 23 // seconds + binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds + expect := func(expected string, inlen, outlen uint8) { + actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) + bytes, ok := actual.([]byte) + if !ok { + t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) + } + if string(bytes) != expected { + t.Errorf( + "expected %q, got %q for length in %d, out %d", + bytes, actual, inlen, outlen, + ) + } + } + expect("0000-00-00", 0, 10) + expect("0000-00-00 00:00:00", 0, 19) + expect("1978-12-30", 4, 10) + expect("1978-12-30 15:46:23", 7, 19) + expect("1978-12-30 15:46:23.987654", 11, 26) +} + +func TestEscapeBackslash(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesBackslash([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringBackslash([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\\0bar", "foo\x00bar") + expect("foo\\nbar", "foo\nbar") + expect("foo\\rbar", "foo\rbar") + expect("foo\\Zbar", "foo\x1abar") + expect("foo\\\"bar", "foo\"bar") + expect("foo\\\\bar", "foo\\bar") + expect("foo\\'bar", "foo'bar") +} + +func TestEscapeQuotes(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesQuotes([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringQuotes([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\x00bar", "foo\x00bar") // not affected + expect("foo\nbar", "foo\nbar") // not affected + expect("foo\rbar", "foo\rbar") // not affected + expect("foo\x1abar", "foo\x1abar") // not affected + expect("foo''bar", "foo'bar") // affected + expect("foo\"bar", "foo\"bar") // not affected +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000..23a6b1734 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto + make diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go new file mode 100644 index 000000000..88c506cf0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go @@ -0,0 +1,2104 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/gogo/protobuf/proto" + . "github.com/gogo/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + wantErr: &RequiredNotSetError{}, + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if _, ok := err.(*RequiredNotSetError); ok { + // We're not in package proto, so we can only assert the type in this case. + err = &RequiredNotSetError{} + } + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + _ = fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if strings.Index(err.Error(), "Label") < 0 { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if strings.Index(err.Error(), "{Unknown}") < 0 { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + _, err := Marshal((*GoEnum)(nil)) + if err != ErrNil { + t.Errorf("Marshal: got err %v, want ErrNil", err) + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: {F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m) + if err == nil { + t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000..57297947b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,217 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: MessageSet and RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsMap); ok { + emOut := out.Addr().Interface().(extensionsMap) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go new file mode 100644 index 000000000..7eef89ee0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go @@ -0,0 +1,245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + + proto3pb "github.com/gogo/protobuf/proto/proto3_proto" + pb "github.com/gogo/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: {F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: {F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000..f7b1884b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,832 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + if ee, ok := e.(extensionsMap); ok { + ext := ee.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + ee.ExtensionMap()[int32(tag)] = ext + } else if ee, ok := e.(extensionsBytes); ok { + ext := ee.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + } + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + + y := *v + for i := 0; i < nb; i++ { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 000000000..6a77aad76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,175 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + l := 1 + size := reflect.TypeOf(value).Elem().Size() + if kind == reflect.Array { + l = reflect.TypeOf(value).Elem().Len() + size = reflect.TypeOf(value).Size() + } + total := int(size) * l + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + setCustomType(newBas, 0, custom) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000..91f3f0784 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1293 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000..f77cfb1ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,354 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_GetStructPointer(base, p.field) + ss1 := structPointer_GetRefStructPointer(ss, field(0)) + size := p.stype.Size() + l := structPointer_Len(base, p.field) + for i := 0; i < l; i++ { + structp := structPointer_Add(ss1, field(uintptr(i)*size)) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_GetStructPointer(base, p.field) + ss1 := structPointer_GetRefStructPointer(ss, field(0)) + size := p.stype.Size() + l := structPointer_Len(base, p.field) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := structPointer_Add(ss1, field(uintptr(i)*size)) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000..d8673a3e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,256 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. +// TODO: MessageSet. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go new file mode 100644 index 000000000..ef6048008 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go @@ -0,0 +1,191 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + . "github.com/gogo/protobuf/proto" + pb "github.com/gogo/protobuf/proto/testdata" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, + + { + "map same", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + true, + }, + { + "map different entry", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, + false, + }, + { + "map different key only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, + false, + }, + { + "map different value only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, + false, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000..9a6374fdb --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,519 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +type extensionsMap interface { + extendableProto + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + extendableProto + GetExtensions() *[]byte +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + if ebase, ok := base.(extensionsMap); ok { + ebase.ExtensionMap()[id] = Extension{enc: b} + } else if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + } else { + panic("unreachable") + } +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + err := encodeExtension(&e) + if err != nil { + return err + } + m[k] = e + } + return nil +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + if epb, doki := pb.(extensionsMap); doki { + _, ok := epb.ExtensionMap()[extension.Field] + return ok + } else if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + panic("unreachable") +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +func clearExtension(pb extendableProto, fieldNum int32) { + if epb, doki := pb.(extensionsMap); doki { + delete(epb.ExtensionMap(), fieldNum) + } else if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + } else { + panic("unreachable") + } +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + clearExtension(pb, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + if epb, doki := pb.(extensionsMap); doki { + emap := epb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil + } else if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + panic("unreachable") +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + return setExtension(pb, extension, value) +} + +func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsMap); doki { + epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + } else if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + } + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..bd55fb68b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,221 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" +) + +func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return sizeExtensionMap(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return setExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go new file mode 100644 index 000000000..86e3006d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go @@ -0,0 +1,292 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/gogo/protobuf/proto" + pb "github.com/gogo/protobuf/proto/testdata" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000..d36f9ad12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,841 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + +package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + break + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + if err != nil { + fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + if err != nil { + fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000..a6c2c06b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,40 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000..9d912bce1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,287 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and MessageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. +// +// When a proto1 proto has a field that looks like: +// optional message info = 3; +// the protocol compiler produces a field in the generated struct that looks like: +// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` +// The package is automatically inserted so there is no need for that proto file to +// import this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type MessageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure MessageSet is a Message. +var _ Message = (*MessageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *MessageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *MessageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *MessageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return ErrNoMessageTypeId + } + return nil // TODO: return error instead? +} + +func (ms *MessageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return ErrNoMessageTypeId + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *MessageSet) Reset() { *ms = MessageSet{} } +func (ms *MessageSet) String() string { return CompactTextString(ms) } +func (*MessageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(MessageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go new file mode 100644 index 000000000..7c29bccf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &MessageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + m := make(map[int32]Extension) + if err := UnmarshalMessageSet(b, m); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := m[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", m) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..749919d25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..e9be0fe92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..6bc85fa98 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,108 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + oldHeader := structPointer_GetSliceHeader(base, f) + newLen := oldHeader.Len + 1 + slice := reflect.MakeSlice(typ, newLen, newLen) + bas := toStructPointer(slice) + for i := 0; i < oldHeader.Len; i++ { + newElemptr := uintptr(bas) + uintptr(i)*size + oldElemptr := oldHeader.Data + uintptr(i)*size + copyUintPtr(oldElemptr, newElemptr, int(size)) + } + + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000..13245c00d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,815 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + if len(p.CustomType) > 0 { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000..8daf9f776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,64 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 000000000..2f2da4604 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-gogo. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/gogo/protobuf/proto" +import testdata "github.com/gogo/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,proto3,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,proto3" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,proto3" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,proto3" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score,proto3" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny,proto3" json:"bunny,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto new file mode 100644 index 000000000..ca670015a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto @@ -0,0 +1,68 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package proto3_proto; + +import "github.com/gogo/protobuf/proto/testdata/test.proto"; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + Nested nested = 6; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; +} + +message Nested { + string bunny = 1; +} + +message MessageWithMap { + map byte_mapping = 1; +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go new file mode 100644 index 000000000..6f9cddc3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + pb "github.com/gogo/protobuf/proto/proto3_proto" + tpb "github.com/gogo/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": {N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go new file mode 100644 index 000000000..a2729c39a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go new file mode 100644 index 000000000..457a479eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go @@ -0,0 +1,142 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/gogo/protobuf/proto" + proto3pb "github.com/gogo/protobuf/proto/proto3_proto" + pb "github.com/gogo/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: {}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: {F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: {}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000..4fe7e0815 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,117 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile new file mode 100644 index 000000000..1e676c37f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: regenerate + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo/version/protoc-min-version + protoc-min-version --version="3.0.0" --gogo_out=. test.proto + diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go new file mode 100644 index 000000000..8e8451537 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--gogo_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go new file mode 100644 index 000000000..8bc688c2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go @@ -0,0 +1,2746 @@ +// Code generated by protoc-gen-gogo. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + MyMessage + Ext + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap +*/ +package testdata + +import proto "github.com/gogo/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type DefaultsMessage struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,def=3.1415", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,def=3.14", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,def=42", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,def=43", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,def=44", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,def=45", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,def=46", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,def=47", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,def=48", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,def=49", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,def=50", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,def=51", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,def=1", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,def=Hello, string", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden new file mode 100644 index 000000000..0387853d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden @@ -0,0 +1,1737 @@ +// Code generated by protoc-gen-gogo. +// source: test.proto +// DO NOT EDIT! + +package testdata + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" + +import () + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x FOO) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + GoTest_TABLE GoTest_KIND = 11 + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x GoTest_KIND) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x MyMessage_Color) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x Defaults_Color) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x RepeatedEnum_Color) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return 0 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return 0 +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return 0 +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,1,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,1,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto new file mode 100644 index 000000000..440dba38d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto @@ -0,0 +1,480 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000..365242441 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,804 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +var ( + messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() +) + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + if sv.Type() == messageSetType { + return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) + } + + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := writeEnum(w, v, props); err != nil { + return err + } + } else if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := writeEnum(w, fv, props); err != nil { + return err + } + } else if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil && len(props.CustomType) > 0 { + var custom Marshaler = v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeMessageSet(w *textWriter, ms *MessageSet) error { + for _, item := range ms.Item { + id := *item.TypeId + if msd, ok := messageSetMap[id]; ok { + // Known message set type. + if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { + return err + } + w.indent() + + pb := reflect.New(msd.t.Elem()) + if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { + if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { + return err + } + } else { + if err := writeStruct(w, pb.Elem()); err != nil { + return err + } + } + } else { + // Unknown type. + if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { + return err + } + w.indent() + if err := writeUnknownStruct(w, item.Message); err != nil { + return err + } + } + w.unindent() + if _, err := w.Write(gtNewline); err != nil { + return err + } + } + return nil +} + +func writeUnknownStruct(w *textWriter, data []byte) error { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + var m map[int32]Extension + if em, ok := ep.(extensionsMap); ok { + m = em.ExtensionMap() + } else if em, ok := ep.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + } + + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000..cdb23373c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,55 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000..9b2fab593 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,815 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { + sprops := GetProperties(st) + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + reqCount := GetProperties(st).reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + } else { + // This is a normal, non-extension field. + name := tok.value + fi, props, ok := structFieldByName(st, name) + if !ok { + return p.errorf("unknown field name %q in %v", name, st) + } + + dst := sv.Field(fi) + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. May already exist. + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(at, flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go new file mode 100644 index 000000000..f1c623192 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go @@ -0,0 +1,511 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/gogo/protobuf/proto" + proto3pb "github.com/gogo/protobuf/proto/proto3_proto" + . "github.com/gogo/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `byte_mapping:` + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + }, + ByteMapping: map[bool][]byte{ + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go new file mode 100644 index 000000000..a98504ef3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go @@ -0,0 +1,450 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/gogo/protobuf/proto" + + proto3pb "github.com/gogo/protobuf/proto/proto3_proto" + pb "github.com/gogo/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + { + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 000000000..f1f06564a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + make diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go new file mode 100644 index 000000000..b787d58aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go @@ -0,0 +1,2104 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + wantErr: &RequiredNotSetError{}, + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if _, ok := err.(*RequiredNotSetError); ok { + // We're not in package proto, so we can only assert the type in this case. + err = &RequiredNotSetError{} + } + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if strings.Index(err.Error(), "Label") < 0 { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if strings.Index(err.Error(), "{Unknown}") < 0 { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + _, err := Marshal((*GoEnum)(nil)) + if err != ErrNil { + t.Errorf("Marshal: got err %v, want ErrNil", err) + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m) + if err == nil { + t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..915a68b8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,212 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: MessageSet and RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go new file mode 100644 index 000000000..a1c697bc8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go @@ -0,0 +1,245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..bf71dcad1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,827 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + + y := *v + for i := 0; i < nb; i++ { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..91f3f0784 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1293 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..d8673a3e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,256 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. +// TODO: MessageSet. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go new file mode 100644 index 000000000..b322f65ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go @@ -0,0 +1,191 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + . "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, + + { + "map same", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + true, + }, + { + "map different entry", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, + false, + }, + { + "map different key only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, + false, + }, + { + "map different value only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, + false, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..e591ccef7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,400 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go new file mode 100644 index 000000000..72552767d --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go @@ -0,0 +1,292 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..95f7975dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,841 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + +package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + break + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + if err != nil { + fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + if err != nil { + fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..9d912bce1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,287 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and MessageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. +// +// When a proto1 proto has a field that looks like: +// optional message info = 3; +// the protocol compiler produces a field in the generated struct that looks like: +// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` +// The package is automatically inserted so there is no need for that proto file to +// import this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type MessageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure MessageSet is a Message. +var _ Message = (*MessageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *MessageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *MessageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *MessageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return ErrNoMessageTypeId + } + return nil // TODO: return error instead? +} + +func (ms *MessageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return ErrNoMessageTypeId + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *MessageSet) Reset() { *ms = MessageSet{} } +func (ms *MessageSet) String() string { return CompactTextString(ms) } +func (*MessageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(MessageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go new file mode 100644 index 000000000..7c29bccf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &MessageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + m := make(map[int32]Extension) + if err := UnmarshalMessageSet(b, m); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := m[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", m) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..749919d25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..e9be0fe92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..d74844ab2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,742 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 000000000..37c778209 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto new file mode 100644 index 000000000..e2311d929 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -0,0 +1,68 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "testdata/test.proto"; + +package proto3_proto; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + Nested nested = 6; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; +} + +message Nested { + string bunny = 1; +} + +message MessageWithMap { + map byte_mapping = 1; +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 000000000..462f8055c --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go new file mode 100644 index 000000000..a2729c39a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go new file mode 100644 index 000000000..db5614fd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go @@ -0,0 +1,142 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile new file mode 100644 index 000000000..fc288628a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go new file mode 100644 index 000000000..7172d0e96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go new file mode 100644 index 000000000..13674a449 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -0,0 +1,2746 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + MyMessage + Ext + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type DefaultsMessage struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,def=3.1415", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,def=3.14", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,def=42", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,def=43", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,def=44", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,def=45", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,def=46", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,def=47", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,def=48", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,def=49", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,def=50", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,def=51", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,def=1", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,def=Hello, string", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto new file mode 100644 index 000000000..440dba38d --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto @@ -0,0 +1,480 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..f3db2cf5e --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,769 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +var ( + messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() +) + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + if sv.Type() == messageSetType { + return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) + } + + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeMessageSet(w *textWriter, ms *MessageSet) error { + for _, item := range ms.Item { + id := *item.TypeId + if msd, ok := messageSetMap[id]; ok { + // Known message set type. + if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { + return err + } + w.indent() + + pb := reflect.New(msd.t.Elem()) + if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { + if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { + return err + } + } else { + if err := writeStruct(w, pb.Elem()); err != nil { + return err + } + } + } else { + // Unknown type. + if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { + return err + } + w.indent() + if err := writeUnknownStruct(w, item.Message); err != nil { + return err + } + } + w.unindent() + if _, err := w.Write(gtNewline); err != nil { + return err + } + } + return nil +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..7d0c75719 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,772 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { + sprops := GetProperties(st) + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + reqCount := GetProperties(st).reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + } else { + // This is a normal, non-extension field. + name := tok.value + fi, props, ok := structFieldByName(st, name) + if !ok { + return p.errorf("unknown field name %q in %v", name, st) + } + + dst := sv.Field(fi) + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. May already exist. + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(at, flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go new file mode 100644 index 000000000..0754b2626 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go @@ -0,0 +1,511 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `byte_mapping:` + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + }, + ByteMapping: map[bool][]byte{ + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go new file mode 100644 index 000000000..64579e94d --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go @@ -0,0 +1,450 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS new file mode 100644 index 000000000..824bf2e14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS @@ -0,0 +1,14 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Sebastien Binet diff --git a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 000000000..9f54f21ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,36 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Russ Cox +Sebastien Binet diff --git a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/golang/snappy/README b/Godeps/_workspace/src/github.com/golang/snappy/README new file mode 100644 index 000000000..5074bbab8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/README @@ -0,0 +1,7 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/golang/snappy/decode.go b/Godeps/_workspace/src/github.com/golang/snappy/decode.go new file mode 100644 index 000000000..e7f1259a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/decode.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if len(dst) < dLen { + dst = make([]byte, dLen) + } + + var d, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-1]) + case x == 61: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-2]) | uint(src[s-1])<<8 + case x == 62: + s += 4 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 + case x == 63: + s += 5 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 + } + length = int(x + 1) + if length <= 0 { + return nil, errors.New("snappy: unsupported literal length") + } + if length > len(dst)-d || length > len(src)-s { + return nil, ErrCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) + + case tagCopy2: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(src[s-2]) | int(src[s-1])<<8 + + case tagCopy4: + return nil, errors.New("snappy: unsupported COPY_4 tag") + } + + end := d + length + if offset > d || end > len(dst) { + return nil, ErrCorrupt + } + for ; d < end; d++ { + dst[d] = dst[d-offset] + } + } + if d != dLen { + return nil, ErrCorrupt + } + return dst[:d], nil +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxUncompressedChunkLen), + buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize), + } +} + +// Reader is an io.Reader than can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4]) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if !r.readFull(r.decoded[:n]) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)]) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen]) { + return 0, r.err + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/encode.go b/Godeps/_workspace/src/github.com/golang/snappy/encode.go new file mode 100644 index 000000000..f3b5484bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/encode.go @@ -0,0 +1,254 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "io" +) + +// We limit how far copy back-references can go, the same as the C++ code. +const maxOffset = 1 << 15 + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + case n < 1<<16: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + case n < 1<<24: + dst[0] = 62<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + i = 4 + case int64(n) < 1<<32: + dst[0] = 63<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + dst[4] = uint8(n >> 24) + i = 5 + default: + panic("snappy: source buffer is too long") + } + if copy(dst[i:], lit) != len(lit) { + panic("snappy: destination buffer is too short") + } + return i + len(lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst []byte, offset, length int) int { + i := 0 + for length > 0 { + x := length - 4 + if 0 <= x && x < 1<<3 && offset < 1<<11 { + dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + i += 2 + break + } + + x = length + if x > 1<<6 { + x = 1 << 6 + } + dst[i+0] = uint8(x-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= x + } + return i +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + d += emitLiteral(dst[d:], src) + } + return dst[:d] + } + + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + const maxTableSize = 1 << 14 + shift, tableSize := uint(32-8), 1<<8 + for tableSize < maxTableSize && tableSize < len(src) { + shift-- + tableSize *= 2 + } + var table [maxTableSize]int + + // Iterate over the source bytes. + var ( + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for s+3 < len(src) { + // Update the hash table. + b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] + h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 + p := &table[(h*0x1e35a7bd)>>shift] + // We need to to store values in [-1, inf) in table. To save + // some initialization time, (re)use the table's zero value + // and shift the values against this zero: add 1 on writes, + // subtract 1 on reads. + t, *p = *p-1, s+1 + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { + s++ + continue + } + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + d += emitLiteral(dst[d:], src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s, t = s+4, t+4 + for s < len(src) && src[s] == src[t] { + s++ + t++ + } + // Emit the copied bytes. + d += emitCopy(dst[d:], s-t, s-s0) + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + d += emitLiteral(dst[d:], src[lit:]) + } + return dst[:d] +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +func MaxEncodedLen(srcLen int) int { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + srcLen + srcLen/6 +} + +// NewWriter returns a new Writer that compresses to w, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)), + } +} + +// Writer is an io.Writer than can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + enc []byte + buf [checksumSize + chunkHeaderSize]byte + wroteHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + w.wroteHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (n int, errRet error) { + if w.err != nil { + return 0, w.err + } + if !w.wroteHeader { + copy(w.enc, magicChunk) + if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil { + w.err = err + return n, err + } + w.wroteHeader = true + } + for len(p) > 0 { + var uncompressed []byte + if len(p) > maxUncompressedChunkLen { + uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + chunkType := uint8(chunkTypeCompressedData) + chunkBody := Encode(w.enc, uncompressed) + if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 { + chunkType, chunkBody = chunkTypeUncompressedData, uncompressed + } + + chunkLen := 4 + len(chunkBody) + w.buf[0] = chunkType + w.buf[1] = uint8(chunkLen >> 0) + w.buf[2] = uint8(chunkLen >> 8) + w.buf[3] = uint8(chunkLen >> 16) + w.buf[4] = uint8(checksum >> 0) + w.buf[5] = uint8(checksum >> 8) + w.buf[6] = uint8(checksum >> 16) + w.buf[7] = uint8(checksum >> 24) + if _, err := w.w.Write(w.buf[:]); err != nil { + w.err = err + return n, err + } + if _, err := w.w.Write(chunkBody); err != nil { + w.err = err + return n, err + } + n += len(uncompressed) + } + return n, nil +} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go new file mode 100644 index 000000000..e98653acf --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go @@ -0,0 +1,68 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer supported. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 bytes". + maxUncompressedChunkLen = 65536 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go new file mode 100644 index 000000000..f8188f11e --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go @@ -0,0 +1,377 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path/filepath" + "strings" + "testing" +) + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + testdata = flag.String("testdata", "testdata", "Directory containing the test data") +) + +func roundtrip(b, ebuf, dbuf []byte) error { + d, err := Decode(dbuf, Encode(ebuf, b)) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if !bytes.Equal(b, d) { + return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rng := rand.New(rand.NewSource(27354294)) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rng.Uint32()) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestInvalidVarint(t *testing.T) { + data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00") + if _, err := DecodedLen(data); err != ErrCorrupt { + t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) + } + if _, err := Decode(nil, data); err != ErrCorrupt { + t.Errorf("Decode: got %v, want ErrCorrupt", err) + } + + // The encoded varint overflows 32 bits + data = []byte("\xff\xff\xff\xff\xff\x00") + + if _, err := DecodedLen(data); err != ErrCorrupt { + t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) + } + if _, err := Decode(nil, data); err != ErrCorrupt { + t.Errorf("Decode: got %v, want ErrCorrupt", err) + } +} + +func cmp(a, b []byte) error { + if len(a) != len(b) { + return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) + } + for i := range a { + if a[i] != b[i] { + return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) + } + } + return nil +} + +func TestFramingFormat(t *testing.T) { + // src is comprised of alternating 1e5-sized sequences of random + // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen + // because it is larger than maxUncompressedChunkLen (64k). + src := make([]byte, 1e6) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 10; i++ { + if i%2 == 0 { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(rng.Intn(256)) + } + } else { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(i) + } + } + } + + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(src); err != nil { + t.Fatalf("Write: encoding: %v", err) + } + dst, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Fatalf("ReadAll: decoding: %v", err) + } + if err := cmp(dst, src); err != nil { + t.Fatal(err) + } +} + +func TestReaderReset(t *testing.T) { + gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000) + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(gold); err != nil { + t.Fatalf("Write: %v", err) + } + encoded, invalid, partial := buf.String(), "invalid", "partial" + r := NewReader(nil) + for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} { + if s == partial { + r.Reset(strings.NewReader(encoded)) + if _, err := r.Read(make([]byte, 101)); err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + continue + } + r.Reset(strings.NewReader(s)) + got, err := ioutil.ReadAll(r) + switch s { + case encoded: + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + if err := cmp(got, gold); err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + case invalid: + if err == nil { + t.Errorf("#%d: got nil error, want non-nil", i) + continue + } + } + } +} + +func TestWriterReset(t *testing.T) { + gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000) + var gots, wants [][]byte + const n = 20 + w, failed := NewWriter(nil), false + for i := 0; i <= n; i++ { + buf := new(bytes.Buffer) + w.Reset(buf) + want := gold[:len(gold)*i/n] + if _, err := w.Write(want); err != nil { + t.Errorf("#%d: Write: %v", i, err) + failed = true + continue + } + got, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Errorf("#%d: ReadAll: %v", i, err) + failed = true + continue + } + gots = append(gots, got) + wants = append(wants, want) + } + if failed { + return + } + for i := range gots { + if err := cmp(gots[i], wants[i]); err != nil { + t.Errorf("#%d: %v", i, err) + } + } +} + +func benchDecode(b *testing.B, src []byte) { + encoded := Encode(nil, src) + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Decode(src, encoded) + } +} + +func benchEncode(b *testing.B, src []byte) { + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + dst := make([]byte, MaxEncodedLen(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Encode(dst, src) + } +} + +func readFile(b testing.TB, filename string) []byte { + src, err := ioutil.ReadFile(filename) + if err != nil { + b.Skipf("skipping benchmark: %v", err) + } + if len(src) == 0 { + b.Fatalf("%s has zero length", filename) + } + return src +} + +// expand returns a slice of length n containing repeated copies of src. +func expand(src []byte, n int) []byte { + dst := make([]byte, n) + for x := dst; len(x) > 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +// testFiles' values are copied directly from +// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string +}{ + {"html", "html"}, + {"urls", "urls.10K"}, + {"jpg", "fireworks.jpeg"}, + {"jpg_200", "fireworks.jpeg"}, + {"pdf", "paper-100k.pdf"}, + {"html4", "html_x_4"}, + {"txt1", "alice29.txt"}, + {"txt2", "asyoulik.txt"}, + {"txt3", "lcet10.txt"}, + {"txt4", "plrabn12.txt"}, + {"pb", "geo.protodata"}, + {"gaviota", "kppkn.gtb"}, +} + +// The test data files are present at this canonical URL. +const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" + +func downloadTestdata(b *testing.B, basename string) (errRet error) { + filename := filepath.Join(*testdata, basename) + if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { + return nil + } + + if !*download { + b.Skipf("test data not found; skipping benchmark without the -download flag") + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create testdata: %s", err) + } + + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + url := baseURL + basename + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("failed to download %s: %s", url, err) + } + defer resp.Body.Close() + if s := resp.StatusCode; s != http.StatusOK { + return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) + } + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) + } + return nil +} + +func benchFile(b *testing.B, n int, decode bool) { + if err := downloadTestdata(b, testFiles[n].filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + data := readFile(b, filepath.Join(*testdata, testFiles[n].filename)) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/README b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/README new file mode 100644 index 000000000..4d34e87af --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/README @@ -0,0 +1,36 @@ +PACKAGE + +package shellquote + import "github.com/kballard/go-shellquote" + + Shellquote provides utilities for joining/splitting strings using sh's + word-splitting rules. + +VARIABLES + +var ( + UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") + UnterminatedEscapeError = errors.New("Unterminated backslash-escape") +) + + +FUNCTIONS + +func Join(args ...string) string + Join quotes each argument and joins them with a space. If passed to + /bin/sh, the resulting string will be split back into the original + arguments. + +func Split(input string) (words []string, err error) + Split splits a string according to /bin/sh's word-splitting rules. It + supports backslash-escapes, single-quotes, and double-quotes. Notably it + does not support the $'' style of quoting. It also doesn't attempt to + perform any other sort of expansion, including brace expansion, shell + expansion, or pathname expansion. + + If the given input has an unterminated quoted string or ends in a + backslash-escape, one of UnterminatedSingleQuoteError, + UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. + + diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go new file mode 100644 index 000000000..9cba3c849 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go @@ -0,0 +1,29 @@ +package shellquote + +import ( + "reflect" + "testing" + "testing/quick" +) + +// this is called bothtest because it tests Split and Join together + +func TestJoinSplit(t *testing.T) { + f := func(strs []string) bool { + // Join, then split, the input + combined := Join(strs...) + split, err := Split(combined) + if err != nil { + t.Logf("Error splitting %#v: %v", combined, err) + return false + } + if !reflect.DeepEqual(strs, split) { + t.Logf("Input %q did not match output %q", strs, split) + return false + } + return true + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go new file mode 100644 index 000000000..9445fa4ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go @@ -0,0 +1,3 @@ +// Shellquote provides utilities for joining/splitting strings using sh's +// word-splitting rules. +package shellquote diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go new file mode 100644 index 000000000..f6cacee0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go @@ -0,0 +1,102 @@ +package shellquote + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// Join quotes each argument and joins them with a space. +// If passed to /bin/sh, the resulting string will be split back into the +// original arguments. +func Join(args ...string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} + +const ( + specialChars = "\\'\"`${[|&;<>()*?!" + extraSpecialChars = " \t\n" + prefixChars = "~" +) + +func quote(word string, buf *bytes.Buffer) { + // We want to try to produce a "nice" output. As such, we will + // backslash-escape most characters, but if we encounter a space, or if we + // encounter an extra-special char (which doesn't work with + // backslash-escaping) we switch over to quoting the whole word. We do this + // with a space because it's typically easier for people to read multi-word + // arguments when quoted with a space rather than with ugly backslashes + // everywhere. + origLen := buf.Len() + + if len(word) == 0 { + // oops, no content + buf.WriteString("''") + return + } + + cur, prev := word, word + atStart := true + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) { + // copy the non-special chars up to this point + if len(cur) < len(prev) { + buf.WriteString(word[0 : len(prev)-len(cur)-l]) + } + buf.WriteByte('\\') + buf.WriteRune(c) + prev = cur + } else if strings.ContainsRune(extraSpecialChars, c) { + // start over in quote mode + buf.Truncate(origLen) + goto quote + } + atStart = false + } + if len(prev) > 0 { + buf.WriteString(prev) + } + return + +quote: + // quote mode + // Use single-quotes, but if we find a single-quote in the word, we need + // to terminate the string, emit an escaped quote, and start the string up + // again + inQuote := false + for len(word) > 0 { + i := strings.IndexRune(word, '\'') + if i == -1 { + break + } + if i > 0 { + if !inQuote { + buf.WriteByte('\'') + inQuote = true + } + buf.WriteString(word[0:i]) + word = word[i+1:] + } + if inQuote { + buf.WriteByte('\'') + inQuote = false + } + buf.WriteString("\\'") + } + if len(word) > 0 { + if !inQuote { + buf.WriteByte('\'') + } + buf.WriteString(word) + buf.WriteByte('\'') + } +} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go new file mode 100644 index 000000000..a4d2d82fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go @@ -0,0 +1,28 @@ +package shellquote + +import ( + "testing" +) + +func TestSimpleJoin(t *testing.T) { + for _, elem := range simpleJoinTest { + output := Join(elem.input...) + if output != elem.output { + t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) + } + } +} + +var simpleJoinTest = []struct { + input []string + output string +}{ + {[]string{"test"}, "test"}, + {[]string{"hello goodbye"}, "'hello goodbye'"}, + {[]string{"hello", "goodbye"}, "hello goodbye"}, + {[]string{"don't you know the dewey decimal system?"}, "'don'\\''t you know the dewey decimal system?'"}, + {[]string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}, "don\\'t you know the dewey decimal system\\?"}, + {[]string{"~user", "u~ser", " ~user", "!~user"}, "\\~user u~ser ' ~user' \\!~user"}, + {[]string{"foo*", "M{ovies,usic}", "ab[cd]", "%3"}, "foo\\* M\\{ovies,usic} ab\\[cd] %3"}, + {[]string{"one", "", "three"}, "one '' three"}, +} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go new file mode 100644 index 000000000..ba3a0f227 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go @@ -0,0 +1,144 @@ +package shellquote + +import ( + "bytes" + "errors" + "strings" + "unicode/utf8" +) + +var ( + UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") + UnterminatedEscapeError = errors.New("Unterminated backslash-escape") +) + +var ( + splitChars = " \n\t" + singleChar = '\'' + doubleChar = '"' + escapeChar = '\\' + doubleEscapeChars = "$`\"\n\\" +) + +// Split splits a string according to /bin/sh's word-splitting rules. It +// supports backslash-escapes, single-quotes, and double-quotes. Notably it does +// not support the $'' style of quoting. It also doesn't attempt to perform any +// other sort of expansion, including brace expansion, shell expansion, or +// pathname expansion. +// +// If the given input has an unterminated quoted string or ends in a +// backslash-escape, one of UnterminatedSingleQuoteError, +// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. +func Split(input string) (words []string, err error) { + var buf bytes.Buffer + words = make([]string, 0) + + for len(input) > 0 { + // skip any splitChars at the start + c, l := utf8.DecodeRuneInString(input) + if strings.ContainsRune(splitChars, c) { + input = input[l:] + continue + } + + var word string + word, input, err = splitWord(input, &buf) + if err != nil { + return + } + words = append(words, word) + } + return +} + +func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) { + buf.Reset() + +raw: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == singleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto single + } else if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto double + } else if c == escapeChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto escape + } else if strings.ContainsRune(splitChars, c) { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + return buf.String(), cur, nil + } + } + if len(input) > 0 { + buf.WriteString(input) + input = "" + } + goto done + } + +escape: + { + if len(input) == 0 { + return "", "", UnterminatedEscapeError + } + c, l := utf8.DecodeRuneInString(input) + if c == '\n' { + // a backslash-escaped newline is elided from the output entirely + } else { + buf.WriteString(input[:l]) + } + input = input[l:] + } + goto raw + +single: + { + i := strings.IndexRune(input, singleChar) + if i == -1 { + return "", "", UnterminatedSingleQuoteError + } + buf.WriteString(input[0:i]) + input = input[i+1:] + goto raw + } + +double: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto raw + } else if c == escapeChar { + // bash only supports certain escapes in double-quoted strings + c2, l2 := utf8.DecodeRuneInString(cur) + cur = cur[l2:] + if strings.ContainsRune(doubleEscapeChars, c2) { + buf.WriteString(input[0 : len(input)-len(cur)-l-l2]) + if c2 == '\n' { + // newline is special, skip the backslash entirely + } else { + buf.WriteRune(c2) + } + input = cur + } + } + } + return "", "", UnterminatedDoubleQuoteError + } + +done: + return buf.String(), input, nil +} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go new file mode 100644 index 000000000..32ea5144b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go @@ -0,0 +1,52 @@ +package shellquote + +import ( + "reflect" + "testing" +) + +func TestSimpleSplit(t *testing.T) { + for _, elem := range simpleSplitTest { + output, err := Split(elem.input) + if err != nil { + t.Errorf("Input %q, got error %#v", elem.input, err) + } else if !reflect.DeepEqual(output, elem.output) { + t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) + } + } +} + +func TestErrorSplit(t *testing.T) { + for _, elem := range errorSplitTest { + _, err := Split(elem.input) + if err != elem.error { + t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error) + } + } +} + +var simpleSplitTest = []struct { + input string + output []string +}{ + {"hello", []string{"hello"}}, + {"hello goodbye", []string{"hello", "goodbye"}}, + {"hello goodbye", []string{"hello", "goodbye"}}, + {"glob* test?", []string{"glob*", "test?"}}, + {"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}}, + {"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}}, + {"one '' two", []string{"one", "", "two"}}, + {"text with\\\na newline", []string{"text", "witha", "newline"}}, + {"\"quoted\\d\\\\\\\" text with a\\\nnewline\"", []string{"quoted\\d\\\" text with anewline"}}, + {"foo\"bar\"baz", []string{"foobarbaz"}}, +} + +var errorSplitTest = []struct { + input string + error error +}{ + {"don't worry", UnterminatedSingleQuoteError}, + {"'test'\\''ing", UnterminatedSingleQuoteError}, + {"\"foo'bar", UnterminatedDoubleQuoteError}, + {"foo\\", UnterminatedEscapeError}, +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go new file mode 100644 index 000000000..c14d810a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Representative Benchmark Results + +Run the benchmark suite using: + go test -bi -bench=. -benchmem + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext_dep_test.go + +*/ +package codec diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md new file mode 100644 index 000000000..6c95d1bfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md @@ -0,0 +1,174 @@ +# Codec + +High Performance and Feature-Rich Idiomatic Go Library providing +encode/decode support for different serialization formats. + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +Online documentation: [http://godoc.org/github.com/ugorji/go/codec] + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +## Representative Benchmark Results + +A sample run of benchmark using "go test -bi -bench=. -benchmem": + + /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) + + .............................................. + BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT + To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." + Benchmark: + Struct recursive Depth: 1 + ApproxDeepSize Of benchmark Struct: 4694 bytes + Benchmark One-Pass Run: + v-msgpack: len: 1600 bytes + bson: len: 3025 bytes + msgpack: len: 1560 bytes + binc: len: 1187 bytes + gob: len: 1972 bytes + json: len: 2538 bytes + .............................................. + PASS + Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op + Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op + Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op + Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op + Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op + Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op + Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op + Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op + Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op + Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op + Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op + Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op + Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op + Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op + ok ugorji.net/codec 30.827s + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext\_dep\_test.go + diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go new file mode 100644 index 000000000..4d437035e --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go @@ -0,0 +1,319 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "reflect" + "runtime" + "testing" + "time" +) + +// Sample way to run: +// go test -bi -bv -bd=1 -benchmem -bench=. + +var ( + _ = fmt.Printf + benchTs *TestStruc + + approxSize int + + benchDoInitBench bool + benchVerify bool + benchUnscientificRes bool = false + //depth of 0 maps to ~400bytes json-encoded string, 1 maps to ~1400 bytes, etc + //For depth>1, we likely trigger stack growth for encoders, making benchmarking unreliable. + benchDepth int + benchInitDebug bool + benchCheckers []benchChecker +) + +type benchEncFn func(interface{}) ([]byte, error) +type benchDecFn func([]byte, interface{}) error +type benchIntfFn func() interface{} + +type benchChecker struct { + name string + encodefn benchEncFn + decodefn benchDecFn +} + +func benchInitFlags() { + flag.BoolVar(&benchInitDebug, "bg", false, "Bench Debug") + flag.IntVar(&benchDepth, "bd", 1, "Bench Depth: If >1, potential unreliable results due to stack growth") + flag.BoolVar(&benchDoInitBench, "bi", false, "Run Bench Init") + flag.BoolVar(&benchVerify, "bv", false, "Verify Decoded Value during Benchmark") + flag.BoolVar(&benchUnscientificRes, "bu", false, "Show Unscientific Results during Benchmark") +} + +func benchInit() { + benchTs = newTestStruc(benchDepth, true) + approxSize = approxDataSize(reflect.ValueOf(benchTs)) + bytesLen := 1024 * 4 * (benchDepth + 1) * (benchDepth + 1) + if bytesLen < approxSize { + bytesLen = approxSize + } + + benchCheckers = append(benchCheckers, + benchChecker{"msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn}, + benchChecker{"binc-nosym", fnBincNoSymEncodeFn, fnBincNoSymDecodeFn}, + benchChecker{"binc-sym", fnBincSymEncodeFn, fnBincSymDecodeFn}, + benchChecker{"simple", fnSimpleEncodeFn, fnSimpleDecodeFn}, + benchChecker{"gob", fnGobEncodeFn, fnGobDecodeFn}, + benchChecker{"json", fnJsonEncodeFn, fnJsonDecodeFn}, + ) + if benchDoInitBench { + runBenchInit() + } +} + +func runBenchInit() { + logT(nil, "..............................................") + logT(nil, "BENCHMARK INIT: %v", time.Now()) + logT(nil, "To run full benchmark comparing encodings (MsgPack, Binc, Simple, JSON, GOB, etc), "+ + "use: \"go test -bench=.\"") + logT(nil, "Benchmark: ") + logT(nil, "\tStruct recursive Depth: %d", benchDepth) + if approxSize > 0 { + logT(nil, "\tApproxDeepSize Of benchmark Struct: %d bytes", approxSize) + } + if benchUnscientificRes { + logT(nil, "Benchmark One-Pass Run (with Unscientific Encode/Decode times): ") + } else { + logT(nil, "Benchmark One-Pass Run:") + } + for _, bc := range benchCheckers { + doBenchCheck(bc.name, bc.encodefn, bc.decodefn) + } + logT(nil, "..............................................") + if benchInitDebug { + logT(nil, "<<<<====>>>> depth: %v, ts: %#v\n", benchDepth, benchTs) + } +} + +func fnBenchNewTs() interface{} { + return new(TestStruc) +} + +func doBenchCheck(name string, encfn benchEncFn, decfn benchDecFn) { + runtime.GC() + tnow := time.Now() + buf, err := encfn(benchTs) + if err != nil { + logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err) + } + encDur := time.Now().Sub(tnow) + encLen := len(buf) + runtime.GC() + if !benchUnscientificRes { + logT(nil, "\t%10s: len: %d bytes\n", name, encLen) + return + } + tnow = time.Now() + if err = decfn(buf, new(TestStruc)); err != nil { + logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err) + } + decDur := time.Now().Sub(tnow) + logT(nil, "\t%10s: len: %d bytes, encode: %v, decode: %v\n", name, encLen, encDur, decDur) +} + +func fnBenchmarkEncode(b *testing.B, encName string, ts interface{}, encfn benchEncFn) { + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := encfn(ts) + if err != nil { + logT(b, "Error encoding benchTs: %s: %v", encName, err) + b.FailNow() + } + } +} + +func fnBenchmarkDecode(b *testing.B, encName string, ts interface{}, + encfn benchEncFn, decfn benchDecFn, newfn benchIntfFn, +) { + buf, err := encfn(ts) + if err != nil { + logT(b, "Error encoding benchTs: %s: %v", encName, err) + b.FailNow() + } + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts = newfn() + if err = decfn(buf, ts); err != nil { + logT(b, "Error decoding into new TestStruc: %s: %v", encName, err) + b.FailNow() + } + if benchVerify { + if vts, vok := ts.(*TestStruc); vok { + verifyTsTree(b, vts) + } + } + } +} + +func verifyTsTree(b *testing.B, ts *TestStruc) { + var ts0, ts1m, ts2m, ts1s, ts2s *TestStruc + ts0 = ts + + if benchDepth > 0 { + ts1m, ts1s = verifyCheckAndGet(b, ts0) + } + + if benchDepth > 1 { + ts2m, ts2s = verifyCheckAndGet(b, ts1m) + } + for _, tsx := range []*TestStruc{ts0, ts1m, ts2m, ts1s, ts2s} { + if tsx != nil { + verifyOneOne(b, tsx) + } + } +} + +func verifyCheckAndGet(b *testing.B, ts0 *TestStruc) (ts1m *TestStruc, ts1s *TestStruc) { + // if len(ts1m.Ms) <= 2 { + // logT(b, "Error: ts1m.Ms len should be > 2. Got: %v", len(ts1m.Ms)) + // b.FailNow() + // } + if len(ts0.Its) == 0 { + logT(b, "Error: ts0.Islice len should be > 0. Got: %v", len(ts0.Its)) + b.FailNow() + } + ts1m = ts0.Mtsptr["0"] + ts1s = ts0.Its[0] + if ts1m == nil || ts1s == nil { + logT(b, "Error: At benchDepth 1, No *TestStruc found") + b.FailNow() + } + return +} + +func verifyOneOne(b *testing.B, ts *TestStruc) { + if ts.I64slice[2] != int64(3) { + logT(b, "Error: Decode failed by checking values") + b.FailNow() + } +} + +func fnMsgpackEncodeFn(ts interface{}) (bs []byte, err error) { + err = NewEncoderBytes(&bs, testMsgpackH).Encode(ts) + return +} + +func fnMsgpackDecodeFn(buf []byte, ts interface{}) error { + return NewDecoderBytes(buf, testMsgpackH).Decode(ts) +} + +func fnBincEncodeFn(ts interface{}, sym AsSymbolFlag) (bs []byte, err error) { + tSym := testBincH.AsSymbols + testBincH.AsSymbols = sym + err = NewEncoderBytes(&bs, testBincH).Encode(ts) + testBincH.AsSymbols = tSym + return +} + +func fnBincDecodeFn(buf []byte, ts interface{}, sym AsSymbolFlag) (err error) { + tSym := testBincH.AsSymbols + testBincH.AsSymbols = sym + err = NewDecoderBytes(buf, testBincH).Decode(ts) + testBincH.AsSymbols = tSym + return +} + +func fnBincNoSymEncodeFn(ts interface{}) (bs []byte, err error) { + return fnBincEncodeFn(ts, AsSymbolNone) +} + +func fnBincNoSymDecodeFn(buf []byte, ts interface{}) error { + return fnBincDecodeFn(buf, ts, AsSymbolNone) +} + +func fnBincSymEncodeFn(ts interface{}) (bs []byte, err error) { + return fnBincEncodeFn(ts, AsSymbolAll) +} + +func fnBincSymDecodeFn(buf []byte, ts interface{}) error { + return fnBincDecodeFn(buf, ts, AsSymbolAll) +} + +func fnSimpleEncodeFn(ts interface{}) (bs []byte, err error) { + err = NewEncoderBytes(&bs, testSimpleH).Encode(ts) + return +} + +func fnSimpleDecodeFn(buf []byte, ts interface{}) error { + return NewDecoderBytes(buf, testSimpleH).Decode(ts) +} + +func fnGobEncodeFn(ts interface{}) ([]byte, error) { + bbuf := new(bytes.Buffer) + err := gob.NewEncoder(bbuf).Encode(ts) + return bbuf.Bytes(), err +} + +func fnGobDecodeFn(buf []byte, ts interface{}) error { + return gob.NewDecoder(bytes.NewBuffer(buf)).Decode(ts) +} + +func fnJsonEncodeFn(ts interface{}) ([]byte, error) { + return json.Marshal(ts) +} + +func fnJsonDecodeFn(buf []byte, ts interface{}) error { + return json.Unmarshal(buf, ts) +} + +func Benchmark__Msgpack____Encode(b *testing.B) { + fnBenchmarkEncode(b, "msgpack", benchTs, fnMsgpackEncodeFn) +} + +func Benchmark__Msgpack____Decode(b *testing.B) { + fnBenchmarkDecode(b, "msgpack", benchTs, fnMsgpackEncodeFn, fnMsgpackDecodeFn, fnBenchNewTs) +} + +func Benchmark__Binc_NoSym_Encode(b *testing.B) { + fnBenchmarkEncode(b, "binc", benchTs, fnBincNoSymEncodeFn) +} + +func Benchmark__Binc_NoSym_Decode(b *testing.B) { + fnBenchmarkDecode(b, "binc", benchTs, fnBincNoSymEncodeFn, fnBincNoSymDecodeFn, fnBenchNewTs) +} + +func Benchmark__Binc_Sym___Encode(b *testing.B) { + fnBenchmarkEncode(b, "binc", benchTs, fnBincSymEncodeFn) +} + +func Benchmark__Binc_Sym___Decode(b *testing.B) { + fnBenchmarkDecode(b, "binc", benchTs, fnBincSymEncodeFn, fnBincSymDecodeFn, fnBenchNewTs) +} + +func Benchmark__Simple____Encode(b *testing.B) { + fnBenchmarkEncode(b, "simple", benchTs, fnSimpleEncodeFn) +} + +func Benchmark__Simple____Decode(b *testing.B) { + fnBenchmarkDecode(b, "simple", benchTs, fnSimpleEncodeFn, fnSimpleDecodeFn, fnBenchNewTs) +} + +func Benchmark__Gob________Encode(b *testing.B) { + fnBenchmarkEncode(b, "gob", benchTs, fnGobEncodeFn) +} + +func Benchmark__Gob________Decode(b *testing.B) { + fnBenchmarkDecode(b, "gob", benchTs, fnGobEncodeFn, fnGobDecodeFn, fnBenchNewTs) +} + +func Benchmark__Json_______Encode(b *testing.B) { + fnBenchmarkEncode(b, "json", benchTs, fnJsonEncodeFn) +} + +func Benchmark__Json_______Decode(b *testing.B) { + fnBenchmarkDecode(b, "json", benchTs, fnJsonEncodeFn, fnJsonDecodeFn, fnBenchNewTs) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go new file mode 100644 index 000000000..2bb5e8fee --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go @@ -0,0 +1,786 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "math" + // "reflect" + // "sync/atomic" + "time" + //"fmt" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +//var _ = fmt.Printf + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + w encWriter + m map[string]uint16 // symbols + s uint32 // symbols sequencer + b [8]byte +} + +func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + bs := encodeTime(v.(time.Time)) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) encodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) encodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) encodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) encodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + switch { + case v >= 0: + e.encUint(bincVdPosInt<<4, true, uint64(v)) + case v == -1: + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + default: + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) encodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + switch { + case v == 0: + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + case pos && v >= 1 && v <= 16: + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + case v <= math.MaxUint8: + e.w.writen2(bd|0x0, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.encIntegerPrune(bd, pos, v, 4) + default: + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) encodeArrayPreamble(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeMapPreamble(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + switch l { + case 0: + e.encBytesLen(c_UTF8, 0) + return + case 1: + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writeUint16(ui) + } + } else { + e.s++ + ui = uint16(e.s) + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + switch { + case l <= math.MaxUint8: + // lenprec = 0 + case l <= math.MaxUint16: + lenprec = 1 + case int64(l) <= math.MaxUint32: + lenprec = 2 + default: + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writeUint16(ui) + } + switch lenprec { + case 0: + e.w.writen1(byte(l)) + case 1: + e.w.writeUint16(uint16(l)) + case 2: + e.w.writeUint32(uint32(l)) + default: + e.w.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd | 0x02) + e.w.writeUint32(uint32(v)) + default: + e.w.writen1(bd | 0x03) + e.w.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecDriver struct { + r decReader + bdRead bool + bdType valueType + bd byte + vd byte + vs byte + b [8]byte + m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) +} + +func (d *bincDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *bincDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + d.bdType = valueTypeNil + case bincSpFalse, bincSpTrue: + d.bdType = valueTypeBool + case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: + d.bdType = valueTypeFloat + case bincSpZero: + d.bdType = valueTypeUint + case bincSpNegOne: + d.bdType = valueTypeInt + default: + decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + d.bdType = valueTypeUint + case bincVdPosInt: + d.bdType = valueTypeUint + case bincVdNegInt: + d.bdType = valueTypeInt + case bincVdFloat: + d.bdType = valueTypeFloat + case bincVdString: + d.bdType = valueTypeString + case bincVdSymbol: + d.bdType = valueTypeSymbol + case bincVdByteArray: + d.bdType = valueTypeBytes + case bincVdTimestamp: + d.bdType = valueTypeTimestamp + case bincVdCustomExt: + d.bdType = valueTypeExt + case bincVdArray: + d.bdType = valueTypeArray + case bincVdMap: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) + } + } + return d.bdType +} + +func (d *bincDecDriver) tryDecodeAsNil() bool { + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + if d.vd != bincVdTimestamp { + decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + } + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(d.r.readUint64()); break; } + switch vs := d.vs; vs & 0x7 { + case bincFlBin32: + d.decFloatPre(vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + case bincFlBin64: + d.decFloatPre(vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + default: + decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:]) + v = uint64(bigen.Uint16(d.b[6:])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 3: + d.r.readb(d.b[4:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:])) + case 7: + d.r.readb(d.b[:]) + v = uint64(bigen.Uint64(d.b[:])) + default: + decErr("unsigned integers with greater than 64 bits of precision not supported") + } + return +} + +func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.vd { + case bincVdPosInt: + ui = d.decUint() + i = int64(ui) + case bincVdNegInt: + ui = d.decUint() + i = -(int64(ui)) + neg = true + case bincVdSmallInt: + i = int64(d.vs) + 1 + ui = uint64(d.vs) + 1 + case bincVdSpecial: + switch d.vs { + case bincSpZero: + //i = 0 + case bincSpNegOne: + neg = true + ui = 1 + i = -1 + default: + decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } + default: + decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } + return +} + +func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.vd { + case bincVdSpecial: + d.bdRead = false + switch d.vs { + case bincSpNan: + return math.NaN() + case bincSpPosInf: + return math.Inf(1) + case bincSpZeroFloat, bincSpZero: + return + case bincSpNegInf: + return math.Inf(-1) + default: + decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } + case bincVdFloat: + f = d.decFloat() + default: + _, i, _ := d.decIntAny() + f = float64(i) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) decodeBool() (b bool) { + switch d.bd { + case (bincVdSpecial | bincSpFalse): + // b = false + case (bincVdSpecial | bincSpTrue): + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) readMapLen() (length int) { + if d.vd != bincVdMap { + decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) readArrayLen() (length int) { + if d.vd != bincVdArray { + decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs <= 3 { + return int(d.decUint()) + } + return int(d.vs - 4) +} + +func (d *bincDecDriver) decodeString() (s string) { + switch d.vd { + case bincVdString, bincVdByteArray: + if length := d.decLen(); length > 0 { + s = string(d.r.readn(length)) + } + case bincVdSymbol: + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint32 + vs := d.vs + //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) + if vs&0x8 == 0 { + symbol = uint32(d.r.readn1()) + } else { + symbol = uint32(d.r.readUint16()) + } + if d.m == nil { + d.m = make(map[uint32]string, 16) + } + + if vs&0x4 == 0 { + s = d.m[symbol] + } else { + var slen int + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(d.r.readUint16()) + case 2: + slen = int(d.r.readUint32()) + case 3: + slen = int(d.r.readUint64()) + } + s = string(d.r.readn(slen)) + d.m[symbol] = s + } + default: + decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + var clen int + switch d.vd { + case bincVdString, bincVdByteArray: + clen = d.decLen() + default: + decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.vd { + case bincVdCustomExt: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case bincVdByteArray: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + vt = valueTypeNil + case bincSpFalse: + vt = valueTypeBool + v = false + case bincSpTrue: + vt = valueTypeBool + v = true + case bincSpNan: + vt = valueTypeFloat + v = math.NaN() + case bincSpPosInf: + vt = valueTypeFloat + v = math.Inf(1) + case bincSpNegInf: + vt = valueTypeFloat + v = math.Inf(-1) + case bincSpZeroFloat: + vt = valueTypeFloat + v = float64(0) + case bincSpZero: + vt = valueTypeUint + v = int64(0) // int8(0) + case bincSpNegOne: + vt = valueTypeInt + v = int64(-1) // int8(-1) + default: + decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + vt = valueTypeUint + v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + vt = valueTypeUint + v = d.decUint() + case bincVdNegInt: + vt = valueTypeInt + v = -(int64(d.decUint())) + case bincVdFloat: + vt = valueTypeFloat + v = d.decFloat() + case bincVdSymbol: + vt = valueTypeSymbol + v = d.decodeString() + case bincVdString: + vt = valueTypeString + v = d.decodeString() + case bincVdByteArray: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case bincVdTimestamp: + vt = valueTypeTimestamp + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + v = tt + case bincVdCustomExt: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case bincVdArray: + vt = valueTypeArray + decodeFurther = true + case bincVdMap: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle +} + +func (h *BincHandle) newEncDriver(w encWriter) encDriver { + return &bincEncDriver{w: w} +} + +func (h *BincHandle) newDecDriver(r decReader) decDriver { + return &bincDecDriver{r: r} +} + +func (_ *BincHandle) writeExt() bool { + return true +} + +func (h *BincHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go new file mode 100644 index 000000000..cb184491f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go @@ -0,0 +1,1002 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Test works by using a slice of interfaces. +// It can test for encoding/decoding into/from a nil interface{} +// or passing the object to encode/decode into. +// +// There are basically 2 main tests here. +// First test internally encodes and decodes things and verifies that +// the artifact was as expected. +// Second test will use python msgpack to create a bunch of golden files, +// read those files, and compare them to what it should be. It then +// writes those files back out and compares the byte streams. +// +// Taken together, the tests are pretty extensive. + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "io/ioutil" + "math" + "net" + "net/rpc" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strconv" + "sync/atomic" + "testing" + "time" +) + +type testVerifyArg int + +const ( + testVerifyMapTypeSame testVerifyArg = iota + testVerifyMapTypeStrIntf + testVerifyMapTypeIntfIntf + // testVerifySliceIntf + testVerifyForPython +) + +var ( + testInitDebug bool + testUseIoEncDec bool + testStructToArray bool + testWriteNoSymbols bool + + _ = fmt.Printf + skipVerifyVal interface{} = &(struct{}{}) + + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 + timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc) + timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc) + timeToCompare3 = time.Unix(0, 0).UTC() + timeToCompare4 = time.Time{}.UTC() + + table []interface{} // main items we encode + tableVerify []interface{} // we verify encoded things against this after decode + tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) + tablePythonVerify []interface{} // for verifying for python, since Python sometimes + // will encode a float32 as float64, or large int as uint + testRpcInt = new(TestRpcInt) + testMsgpackH = &MsgpackHandle{} + testBincH = &BincHandle{} + testSimpleH = &SimpleHandle{} +) + +func testInitFlags() { + // delete(testDecOpts.ExtFuncs, timeTyp) + flag.BoolVar(&testInitDebug, "tg", false, "Test Debug") + flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") + flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") + flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") +} + +type AnonInTestStruc struct { + AS string + AI64 int64 + AI16 int16 + AUi64 uint64 + ASslice []string + AI64slice []int64 +} + +type TestStruc struct { + S string + I64 int64 + I16 int16 + Ui64 uint64 + Ui8 uint8 + B bool + By byte + + Sslice []string + I64slice []int64 + I16slice []int16 + Ui64slice []uint64 + Ui8slice []uint8 + Bslice []bool + Byslice []byte + + Islice []interface{} + Iptrslice []*int64 + + AnonInTestStruc + + //M map[interface{}]interface{} `json:"-",bson:"-"` + Ms map[string]interface{} + Msi64 map[string]int64 + + Nintf interface{} //don't set this, so we can test for nil + T time.Time + Nmap map[string]bool //don't set this, so we can test for nil + Nslice []byte //don't set this, so we can test for nil + Nint64 *int64 //don't set this, so we can test for nil + Mtsptr map[string]*TestStruc + Mts map[string]TestStruc + Its []*TestStruc + Nteststruc *TestStruc +} + +type TestABC struct { + A, B, C string +} + +type TestRpcInt struct { + i int +} + +func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } +func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } +func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } +func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { + *res = fmt.Sprintf("%#v", arg) + return nil +} +func (r *TestRpcInt) Echo123(args []string, res *string) error { + *res = fmt.Sprintf("%#v", args) + return nil +} + +func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { + //for python msgpack, + // - all positive integers are unsigned 64-bit ints + // - all floats are float64 + switch iv := v.(type) { + case int8: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int16: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int32: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int64: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case uint8: + v2 = uint64(iv) + case uint16: + v2 = uint64(iv) + case uint32: + v2 = uint64(iv) + case uint64: + v2 = uint64(iv) + case float32: + v2 = float64(iv) + case float64: + v2 = float64(iv) + case []interface{}: + m2 := make([]interface{}, len(iv)) + for j, vj := range iv { + m2[j] = testVerifyVal(vj, arg) + } + v2 = m2 + case map[string]bool: + switch arg { + case testVerifyMapTypeSame: + m2 := make(map[string]bool) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + case testVerifyMapTypeStrIntf, testVerifyForPython: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + case testVerifyMapTypeIntfIntf: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + } + case map[string]interface{}: + switch arg { + case testVerifyMapTypeSame: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + case testVerifyMapTypeStrIntf, testVerifyForPython: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + case testVerifyMapTypeIntfIntf: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + } + case map[interface{}]interface{}: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) + } + v2 = m2 + case time.Time: + switch arg { + case testVerifyForPython: + if iv2 := iv.UnixNano(); iv2 > 0 { + v2 = uint64(iv2) + } else { + v2 = int64(iv2) + } + default: + v2 = v + } + default: + v2 = v + } + return +} + +func testInit() { + gob.Register(new(TestStruc)) + if testInitDebug { + ts0 := newTestStruc(2, false) + fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) + } + + testBincH.StructToArray = testStructToArray + if testWriteNoSymbols { + testBincH.AsSymbols = AsSymbolNone + } else { + testBincH.AsSymbols = AsSymbolAll + } + testMsgpackH.StructToArray = testStructToArray + testMsgpackH.RawToString = true + // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) + // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) + timeEncExt := func(rv reflect.Value) ([]byte, error) { + return encodeTime(rv.Interface().(time.Time)), nil + } + timeDecExt := func(rv reflect.Value, bs []byte) error { + tt, err := decodeTime(bs) + if err == nil { + rv.Set(reflect.ValueOf(tt)) + } + return err + } + + // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. + testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) + testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) + + primitives := []interface{}{ + int8(-8), + int16(-1616), + int32(-32323232), + int64(-6464646464646464), + uint8(192), + uint16(1616), + uint32(32323232), + uint64(6464646464646464), + byte(192), + float32(-3232.0), + float64(-6464646464.0), + float32(3232.0), + float64(6464646464.0), + false, + true, + nil, + "someday", + "", + "bytestring", + timeToCompare1, + timeToCompare2, + timeToCompare3, + timeToCompare4, + } + mapsAndStrucs := []interface{}{ + map[string]bool{ + "true": true, + "false": false, + }, + map[string]interface{}{ + "true": "True", + "false": false, + "uint16(1616)": uint16(1616), + }, + //add a complex combo map in here. (map has list which has map) + //note that after the first thing, everything else should be generic. + map[string]interface{}{ + "list": []interface{}{ + int16(1616), + int32(32323232), + true, + float32(-3232.0), + map[string]interface{}{ + "TRUE": true, + "FALSE": false, + }, + []interface{}{true, false}, + }, + "int32": int32(32323232), + "bool": true, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890", + }, + map[interface{}]interface{}{ + true: "true", + uint8(138): false, + "false": uint8(200), + }, + newTestStruc(0, false), + } + + table = []interface{}{} + table = append(table, primitives...) //0-19 are primitives + table = append(table, primitives) //20 is a list of primitives + table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct + + tableVerify = make([]interface{}, len(table)) + tableTestNilVerify = make([]interface{}, len(table)) + tablePythonVerify = make([]interface{}, len(table)) + + lp := len(primitives) + av := tableVerify + for i, v := range table { + if i == lp+3 { + av[i] = skipVerifyVal + continue + } + //av[i] = testVerifyVal(v, testVerifyMapTypeSame) + switch v.(type) { + case []interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + case map[string]interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + case map[interface{}]interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + default: + av[i] = v + } + } + + av = tableTestNilVerify + for i, v := range table { + if i > lp+3 { + av[i] = skipVerifyVal + continue + } + av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) + } + + av = tablePythonVerify + for i, v := range table { + if i > lp+3 { + av[i] = skipVerifyVal + continue + } + av[i] = testVerifyVal(v, testVerifyForPython) + } + + tablePythonVerify = tablePythonVerify[:24] +} + +func testUnmarshal(v interface{}, data []byte, h Handle) error { + if testUseIoEncDec { + return NewDecoder(bytes.NewBuffer(data), h).Decode(v) + } + return NewDecoderBytes(data, h).Decode(v) +} + +func testMarshal(v interface{}, h Handle) (bs []byte, err error) { + if testUseIoEncDec { + var buf bytes.Buffer + err = NewEncoder(&buf, h).Encode(v) + bs = buf.Bytes() + return + } + err = NewEncoderBytes(&bs, h).Encode(v) + return +} + +func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { + if bs, err = testMarshal(v, h); err != nil { + logT(t, "Error encoding %s: %v, Err: %v", name, v, err) + t.FailNow() + } + return +} + +func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { + if err = testUnmarshal(v, data, h); err != nil { + logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) + t.FailNow() + } + return +} + +func newTestStruc(depth int, bench bool) (ts *TestStruc) { + var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 + + ts = &TestStruc{ + S: "some string", + I64: math.MaxInt64 * 2 / 3, // 64, + I16: 16, + Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it + Ui8: 160, + B: true, + By: 5, + + Sslice: []string{"one", "two", "three"}, + I64slice: []int64{1, 2, 3}, + I16slice: []int16{4, 5, 6}, + Ui64slice: []uint64{137, 138, 139}, + Ui8slice: []uint8{210, 211, 212}, + Bslice: []bool{true, false, true, false}, + Byslice: []byte{13, 14, 15}, + + Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, + + Ms: map[string]interface{}{ + "true": "true", + "int64(9)": false, + }, + Msi64: map[string]int64{ + "one": 1, + "two": 2, + }, + T: timeToCompare1, + AnonInTestStruc: AnonInTestStruc{ + AS: "A-String", + AI64: 64, + AI16: 16, + AUi64: 64, + ASslice: []string{"Aone", "Atwo", "Athree"}, + AI64slice: []int64{1, 2, 3}, + }, + } + //For benchmarks, some things will not work. + if !bench { + //json and bson require string keys in maps + //ts.M = map[interface{}]interface{}{ + // true: "true", + // int8(9): false, + //} + //gob cannot encode nil in element in array (encodeArray: nil element) + ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} + // ts.Iptrslice = nil + } + if depth > 0 { + depth-- + if ts.Mtsptr == nil { + ts.Mtsptr = make(map[string]*TestStruc) + } + if ts.Mts == nil { + ts.Mts = make(map[string]TestStruc) + } + ts.Mtsptr["0"] = newTestStruc(depth, bench) + ts.Mts["0"] = *(ts.Mtsptr["0"]) + ts.Its = append(ts.Its, ts.Mtsptr["0"]) + } + return +} + +// doTestCodecTableOne allows us test for different variations based on arguments passed. +func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, + vs []interface{}, vsVerify []interface{}) { + //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. + //Current setup allows us test (at least manually) the nil interface or typed interface. + logT(t, "================ TestNil: %v ================\n", testNil) + for i, v0 := range vs { + logT(t, "..............................................") + logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) + b0, err := testMarshalErr(v0, h, t, "v0") + if err != nil { + continue + } + logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) + + var v1 interface{} + + if testNil { + err = testUnmarshal(&v1, b0, h) + } else { + if v0 != nil { + v0rt := reflect.TypeOf(v0) // ptr + rv1 := reflect.New(v0rt) + err = testUnmarshal(rv1.Interface(), b0, h) + v1 = rv1.Elem().Interface() + // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() + } + } + + logT(t, " v1 returned: %T, %#v", v1, v1) + // if v1 != nil { + // logT(t, " v1 returned: %T, %#v", v1, v1) + // //we always indirect, because ptr to typed value may be passed (if not testNil) + // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() + // } + if err != nil { + logT(t, "-------- Error: %v. Partial return: %v", err, v1) + failT(t) + continue + } + v0check := vsVerify[i] + if v0check == skipVerifyVal { + logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) + continue + } + + if err = deepEqual(v0check, v1); err == nil { + logT(t, "++++++++ Before and After marshal matched\n") + } else { + logT(t, "-------- Before and After marshal do not match: Error: %v"+ + " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) + failT(t) + } + } +} + +func testCodecTableOne(t *testing.T, h Handle) { + // func TestMsgpackAllExperimental(t *testing.T) { + // dopts := testDecOpts(nil, nil, false, true, true), + + switch v := h.(type) { + case *MsgpackHandle: + var oldWriteExt, oldRawToString bool + oldWriteExt, v.WriteExt = v.WriteExt, true + oldRawToString, v.RawToString = v.RawToString, true + doTestCodecTableOne(t, false, h, table, tableVerify) + v.WriteExt, v.RawToString = oldWriteExt, oldRawToString + default: + doTestCodecTableOne(t, false, h, table, tableVerify) + } + // func TestMsgpackAll(t *testing.T) { + idxTime, numPrim, numMap := 19, 23, 4 + + //skip []interface{} containing time.Time + doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) + doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) + // func TestMsgpackNilStringMap(t *testing.T) { + var oldMapType reflect.Type + v := h.getBasicHandle() + oldMapType, v.MapType = v.MapType, mapStrIntfTyp + + //skip time.Time, []interface{} containing time.Time, last map, and newStruc + doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) + doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) + + v.MapType = oldMapType + + // func TestMsgpackNilIntf(t *testing.T) { + + //do newTestStruc and last element of map + doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) + //TODO? What is this one? + //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) +} + +func testCodecMiscOne(t *testing.T, h Handle) { + b, err := testMarshalErr(32, h, t, "32") + // Cannot do this nil one, because faster type assertion decoding will panic + // var i *int32 + // if err = testUnmarshal(b, i, nil); err == nil { + // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") + // t.FailNow() + // } + var i2 int32 = 0 + err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") + if i2 != int32(32) { + logT(t, "------- didn't unmarshal to 32: Received: %d", i2) + t.FailNow() + } + + // func TestMsgpackDecodePtr(t *testing.T) { + ts := newTestStruc(0, false) + b, err = testMarshalErr(ts, h, t, "pointer-to-struct") + if len(b) < 40 { + logT(t, "------- Size must be > 40. Size: %d", len(b)) + t.FailNow() + } + logT(t, "------- b: %v", b) + ts2 := new(TestStruc) + err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") + if ts2.I64 != math.MaxInt64*2/3 { + logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) + t.FailNow() + } + + // func TestMsgpackIntfDecode(t *testing.T) { + m := map[string]int{"A": 2, "B": 3} + p := []interface{}{m} + bs, err := testMarshalErr(p, h, t, "p") + + m2 := map[string]int{} + p2 := []interface{}{m2} + err = testUnmarshalErr(&p2, bs, h, t, "&p2") + + if m2["A"] != 2 || m2["B"] != 3 { + logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) + t.FailNow() + } + // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) + checkEqualT(t, p, p2, "p=p2") + checkEqualT(t, m, m2, "m=m2") + if err = deepEqual(p, p2); err == nil { + logT(t, "p and p2 match") + } else { + logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) + t.FailNow() + } + if err = deepEqual(m, m2); err == nil { + logT(t, "m and m2 match") + } else { + logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) + t.FailNow() + } + + // func TestMsgpackDecodeStructSubset(t *testing.T) { + // test that we can decode a subset of the stream + mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} + bs, err = testMarshalErr(mm, h, t, "mm") + type ttt struct { + A uint8 + C int32 + } + var t2 ttt + testUnmarshalErr(&t2, bs, h, t, "t2") + t3 := ttt{5, 333} + checkEqualT(t, t2, t3, "t2=t3") + + // println(">>>>>") + // test simple arrays, non-addressable arrays, slices + type tarr struct { + A int64 + B [3]int64 + C []byte + D [3]byte + } + var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} + // test both pointer and non-pointer (value) + for _, tarr1 := range []interface{}{tarr0, &tarr0} { + bs, err = testMarshalErr(tarr1, h, t, "tarr1") + var tarr2 tarr + testUnmarshalErr(&tarr2, bs, h, t, "tarr2") + checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") + // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) + } + + // test byte array, even if empty (msgpack only) + if h == testMsgpackH { + type ystruct struct { + Anarray []byte + } + var ya = ystruct{} + testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") + } +} + +func testCodecEmbeddedPointer(t *testing.T, h Handle) { + type Z int + type A struct { + AnInt int + } + type B struct { + *Z + *A + MoreInt int + } + var z Z = 4 + x1 := &B{&z, &A{5}, 6} + bs, err := testMarshalErr(x1, h, t, "x1") + // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) + var x2 = new(B) + err = testUnmarshalErr(x2, bs, h, t, "x2") + err = checkEqualT(t, x1, x2, "x1=x2") + _ = err +} + +func doTestRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, +) (port int) { + // rpc needs EOF, which is sent via a panic, and so must be recovered. + if !recoverPanicToErr { + logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") + t.FailNow() + } + srv := rpc.NewServer() + srv.Register(testRpcInt) + ln, err := net.Listen("tcp", "127.0.0.1:0") + // log("listener: %v", ln.Addr()) + checkErrT(t, err) + port = (ln.Addr().(*net.TCPAddr)).Port + // var opts *DecoderOptions + // opts := testDecOpts + // opts.MapType = mapStrIntfTyp + // opts.RawToString = false + serverExitChan := make(chan bool, 1) + var serverExitFlag uint64 = 0 + serverFn := func() { + for { + conn1, err1 := ln.Accept() + // if err1 != nil { + // //fmt.Printf("accept err1: %v\n", err1) + // continue + // } + if atomic.LoadUint64(&serverExitFlag) == 1 { + serverExitChan <- true + conn1.Close() + return // exit serverFn goroutine + } + if err1 == nil { + var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) + srv.ServeCodec(sc) + } + } + } + + clientFn := func(cc rpc.ClientCodec) { + cl := rpc.NewClientWithCodec(cc) + defer cl.Close() + var up, sq, mult int + var rstr string + // log("Calling client") + checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) + // log("Called TestRpcInt.Update") + checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") + checkEqualT(t, up, 5, "up=5") + checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) + checkEqualT(t, sq, 25, "sq=25") + checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) + checkEqualT(t, mult, 100, "mult=100") + checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) + checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") + checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) + checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") + } + + connFn := func() (bs net.Conn) { + // log("calling f1") + bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) + //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) + checkErrT(t, err2) + return + } + + exitFn := func() { + atomic.StoreUint64(&serverExitFlag, 1) + bs := connFn() + <-serverExitChan + bs.Close() + // serverExitChan <- true + } + + go serverFn() + runtime.Gosched() + //time.Sleep(100 * time.Millisecond) + if exitSleepMs == 0 { + defer ln.Close() + defer exitFn() + } + if doRequest { + bs := connFn() + cc := rr.ClientCodec(bs, h) + clientFn(cc) + } + if exitSleepMs != 0 { + go func() { + defer ln.Close() + time.Sleep(exitSleepMs) + exitFn() + }() + } + return +} + +// Comprehensive testing that generates data encoded from python msgpack, +// and validates that our code can read and write it out accordingly. +// We keep this unexported here, and put actual test in ext_dep_test.go. +// This way, it can be excluded by excluding file completely. +func doTestMsgpackPythonGenStreams(t *testing.T) { + logT(t, "TestPythonGenStreams") + tmpdir, err := ioutil.TempDir("", "golang-msgpack-test") + if err != nil { + logT(t, "-------- Unable to create temp directory\n") + t.FailNow() + } + defer os.RemoveAll(tmpdir) + logT(t, "tmpdir: %v", tmpdir) + cmd := exec.Command("python", "msgpack_test.py", "testdata", tmpdir) + //cmd.Stdin = strings.NewReader("some input") + //cmd.Stdout = &out + var cmdout []byte + if cmdout, err = cmd.CombinedOutput(); err != nil { + logT(t, "-------- Error running msgpack_test.py testdata. Err: %v", err) + logT(t, " %v", string(cmdout)) + t.FailNow() + } + + oldMapType := testMsgpackH.MapType + for i, v := range tablePythonVerify { + testMsgpackH.MapType = oldMapType + //load up the golden file based on number + //decode it + //compare to in-mem object + //encode it again + //compare to output stream + logT(t, "..............................................") + logT(t, " Testing: #%d: %T, %#v\n", i, v, v) + var bss []byte + bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+".golden")) + if err != nil { + logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) + failT(t) + continue + } + testMsgpackH.MapType = mapStrIntfTyp + + var v1 interface{} + if err = testUnmarshal(&v1, bss, testMsgpackH); err != nil { + logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) + failT(t) + continue + } + if v == skipVerifyVal { + continue + } + //no need to indirect, because we pass a nil ptr, so we already have the value + //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } + if err = deepEqual(v, v1); err == nil { + logT(t, "++++++++ Objects match") + } else { + logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) + logT(t, "-------- AGAINST: %#v", v) + logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) + failT(t) + } + bsb, err := testMarshal(v1, testMsgpackH) + if err != nil { + logT(t, "Error encoding to stream: %d: Err: %v", i, err) + failT(t) + continue + } + if err = deepEqual(bsb, bss); err == nil { + logT(t, "++++++++ Bytes match") + } else { + logT(t, "???????? Bytes do not match. %v.", err) + xs := "--------" + if reflect.ValueOf(v).Kind() == reflect.Map { + xs = " " + logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) + } else { + logT(t, "%s It's not a map. They should match.", xs) + failT(t) + } + logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) + logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) + } + } + testMsgpackH.MapType = oldMapType +} + +// To test MsgpackSpecRpc, we test 3 scenarios: +// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) +// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) +// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) +// +// This allows us test the different calling conventions +// - Go Service requires only one argument +// - Python Service allows multiple arguments + +func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { + openPort := "6789" + cmd := exec.Command("python", "msgpack_test.py", "rpc-server", openPort, "2") + checkErrT(t, cmd.Start()) + time.Sleep(100 * time.Millisecond) // time for python rpc server to start + bs, err2 := net.Dial("tcp", ":"+openPort) + checkErrT(t, err2) + cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) + cl := rpc.NewClientWithCodec(cc) + defer cl.Close() + var rstr string + checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) + //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") + var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} + checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) + checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") +} + +func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { + port := doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) + //time.Sleep(1000 * time.Millisecond) + cmd := exec.Command("python", "msgpack_test.py", "rpc-client-go-service", strconv.Itoa(port)) + var cmdout []byte + var err error + if cmdout, err = cmd.CombinedOutput(); err != nil { + logT(t, "-------- Error running msgpack_test.py rpc-client-go-service. Err: %v", err) + logT(t, " %v", string(cmdout)) + t.FailNow() + } + checkEqualT(t, string(cmdout), + fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") +} + +func TestBincCodecsTable(t *testing.T) { + testCodecTableOne(t, testBincH) +} + +func TestBincCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testBincH) +} + +func TestBincCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testBincH) +} + +func TestSimpleCodecsTable(t *testing.T) { + testCodecTableOne(t, testSimpleH) +} + +func TestSimpleCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testSimpleH) +} + +func TestSimpleCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testSimpleH) +} + +func TestMsgpackCodecsTable(t *testing.T) { + testCodecTableOne(t, testMsgpackH) +} + +func TestMsgpackCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testMsgpackH) +} + +func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testMsgpackH) +} + +func TestBincRpcGo(t *testing.T) { + doTestRpcOne(t, GoRpc, testBincH, true, 0) +} + +func _TestSimpleRpcGo(t *testing.T) { + doTestRpcOne(t, GoRpc, testSimpleH, true, 0) +} + +func TestMsgpackRpcGo(t *testing.T) { + doTestRpcOne(t, GoRpc, testMsgpackH, true, 0) +} + +func TestMsgpackRpcSpec(t *testing.T) { + doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) +} + +// TODO: +// Add Tests for: +// - decoding empty list/map in stream into a nil slice/map +// - binary(M|Unm)arsher support for time.Time diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go new file mode 100644 index 000000000..87bef2b93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go @@ -0,0 +1,1048 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" + // "runtime/debug" +) + +// Some tagging information for error messages. +const ( + msgTagDec = "codec.decoder" + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + readn(n int) []byte + readb([]byte) + readn1() uint8 + readUint16() uint16 + readUint32() uint32 + readUint64() uint64 +} + +type decDriver interface { + initReadNext() + tryDecodeAsNil() bool + currentEncodedType() valueType + isBuiltinType(rt uintptr) bool + decodeBuiltin(rt uintptr, v interface{}) + //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + decodeNaked() (v interface{}, vt valueType, decodeFurther bool) + decodeInt(bitsize uint8) (i int64) + decodeUint(bitsize uint8) (ui uint64) + decodeFloat(chkOverflow32 bool) (f float64) + decodeBool() (b bool) + // decodeString can also decode symbols + decodeString() (s string) + decodeBytes(bs []byte) (bsOut []byte, changed bool) + decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + readMapLen() int + readArrayLen() int +} + +type DecodeOptions struct { + // An instance of MapType is used during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + // An instance of SliceType is used during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + // ErrorIfNoField controls whether an error is returned when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool +} + +// ------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + r io.Reader + br io.ByteReader + x [8]byte //temp byte array re-used internally for efficiency +} + +func (z *ioDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + bs = make([]byte, n) + if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + panic(err) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + panic(err) + } +} + +func (z *ioDecReader) readn1() uint8 { + if z.br != nil { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + return b + } + z.readb(z.x[:1]) + return z.x[0] +} + +func (z *ioDecReader) readUint16() uint16 { + z.readb(z.x[:2]) + return bigen.Uint16(z.x[:2]) +} + +func (z *ioDecReader) readUint32() uint32 { + z.readb(z.x[:4]) + return bigen.Uint32(z.x[:4]) +} + +func (z *ioDecReader) readUint64() uint64 { + z.readb(z.x[:8]) + return bigen.Uint64(z.x[:8]) +} + +// ------------------------------------ + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available +} + +func (z *bytesDecReader) consume(n int) (oldcursor int) { + if z.a == 0 { + panic(io.EOF) + } + if n > z.a { + decErr("Trying to read %v bytes. Only %v available", n, z.a) + } + // z.checkAvailable(n) + oldcursor = z.c + z.c = oldcursor + n + z.a = z.a - n + return +} + +func (z *bytesDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + c0 := z.consume(n) + bs = z.b[c0:z.c] + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readn(len(bs))) +} + +func (z *bytesDecReader) readn1() uint8 { + c0 := z.consume(1) + return z.b[c0] +} + +// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits +// creating temp slice variable and copying it to helper function is expensive +// for just 2 bits. + +func (z *bytesDecReader) readUint16() uint16 { + c0 := z.consume(2) + return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +} + +func (z *bytesDecReader) readUint32() uint32 { + c0 := z.consume(4) + return bigen.Uint32(z.b[c0:z.c]) +} + +func (z *bytesDecReader) readUint64() uint64 { + c0 := z.consume(8) + return bigen.Uint64(z.b[c0:z.c]) +} + +// ------------------------------------ + +// decFnInfo has methods for registering handling decoding of a specific type +// based on some characteristics (builtin, extension, reflect Kind, etc) +type decFnInfo struct { + ti *typeInfo + d *Decoder + dd decDriver + xfFn func(reflect.Value, []byte) error + xfTag byte + array bool +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + xtag, xbs := f.dd.decodeExt(false, 0) + rv.Field(0).SetUint(uint64(xtag)) + rv.Field(1).SetBytes(xbs) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + _, xbs := f.dd.decodeExt(true, f.xfTag) + if fnerr := f.xfFn(rv, xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryUnmarshaler + if f.ti.unmIndir == -1 { + bm = rv.Addr().Interface().(binaryUnmarshaler) + } else if f.ti.unmIndir == 0 { + bm = rv.Interface().(binaryUnmarshaler) + } else { + for j, k := int8(0), f.ti.unmIndir; j < k; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryUnmarshaler) + } + xbs, _ := f.dd.decodeBytes(nil) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.dd.decodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.dd.decodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + if !rv.IsNil() { + f.d.decodeValue(rv.Elem()) + return + } + // nil interface: + // use some hieristics to set the nil interface to an + // appropriate value based on the first byte read (byte descriptor bd) + v, vt, decodeFurther := f.dd.decodeNaked() + if vt == valueTypeNil { + return + } + // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) + // if non-nil value in stream. + if num := f.ti.rt.NumMethod(); num > 0 { + decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", + f.ti.rt, num) + } + var rvn reflect.Value + var useRvn bool + switch vt { + case valueTypeMap: + if f.d.h.MapType == nil { + var m2 map[interface{}]interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.MapType).Elem() + useRvn = true + } + case valueTypeArray: + if f.d.h.SliceType == nil { + var m2 []interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.SliceType).Elem() + useRvn = true + } + case valueTypeExt: + re := v.(*RawExt) + var bfn func(reflect.Value, []byte) error + rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + if bfn == nil { + rvn = reflect.ValueOf(*re) + } else if fnerr := bfn(rvn, re.Data); fnerr != nil { + panic(fnerr) + } + rv.Set(rvn) + return + } + if decodeFurther { + if useRvn { + f.d.decodeValue(rvn) + } else if v != nil { + // this v is a pointer, so we need to dereference it when done + f.d.decode(v) + rvn = reflect.ValueOf(v).Elem() + useRvn = true + } + } + if useRvn { + rv.Set(rvn) + } else if v != nil { + rv.Set(reflect.ValueOf(v)) + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { + containerLen := f.dd.readMapLen() + if containerLen == 0 { + return + } + tisfi := fti.sfi + for j := 0; j < containerLen; j++ { + // var rvkencname string + // ddecode(&rvkencname) + f.dd.initReadNext() + rvkencname := f.dd.decodeString() + // rvksi := ti.getForEncName(rvkencname) + if k := fti.indexForEncName(rvkencname); k > -1 { + sfik := tisfi[k] + if sfik.i != -1 { + f.d.decodeValue(rv.Field(int(sfik.i))) + } else { + f.d.decEmbeddedField(rv, sfik.is) + } + // f.d.decodeValue(ti.field(k, rv)) + } else { + if f.d.h.ErrorIfNoField { + decErr("No matching struct field found when decoding stream map with key: %v", + rvkencname) + } else { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } + } else if currEncodedType == valueTypeArray { + containerLen := f.dd.readArrayLen() + if containerLen == 0 { + return + } + for j, si := range fti.sfip { + if j == containerLen { + break + } + if si.i != -1 { + f.d.decodeValue(rv.Field(int(si.i))) + } else { + f.d.decEmbeddedField(rv, si.is) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } else { + decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", + currEncodedType) + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + currEncodedType := f.dd.currentEncodedType() + + switch currEncodedType { + case valueTypeBytes, valueTypeString: + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { + rv.SetBytes(bs2) + } + return + } + } + + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case intfSliceTypId: + f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) + return + case uint64SliceTypId: + f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) + return + case int64SliceTypId: + f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) + return + case strSliceTypId: + f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) + return + } + } + + containerLen, containerLenS := decContLens(f.dd, currEncodedType) + + // an array can never return a nil slice. so no need to check f.array here. + + if rv.IsNil() { + rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + } + + if containerLen == 0 { + return + } + + if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { + if f.array { // !rv.CanSet() + decErr(msgDecCannotExpandArr, rvcap, containerLenS) + } + rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) + if rvlen > 0 { + reflect.Copy(rvn, rv) + } + rv.Set(rvn) + } else if containerLenS > rvlen { + rv.SetLen(containerLenS) + } + + for j := 0; j < containerLenS; j++ { + f.d.decodeValue(rv.Index(j)) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case mapStrIntfTypId: + f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) + return + case mapIntfIntfTypId: + f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) + return + case mapInt64IntfTypId: + f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) + return + case mapUint64IntfTypId: + f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) + return + } + } + + containerLen := f.dd.readMapLen() + + if rv.IsNil() { + rv.Set(reflect.MakeMap(f.ti.rt)) + } + + if containerLen == 0 { + return + } + + ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + for j := 0; j < containerLen; j++ { + rvk := reflect.New(ktype).Elem() + f.d.decodeValue(rvk) + + // special case if a byte array. + // if ktype == intfTyp { + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(string(rvk.Bytes())) + } + } + rvv := rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } + + f.d.decodeValue(rvv) + rv.SetMapIndex(rvk, rvv) + } +} + +// ---------------------------------------- + +type decFn struct { + i *decFnInfo + f func(*decFnInfo, reflect.Value) +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + r decReader + d decDriver + h *BasicHandle + f map[uintptr]decFn + x []uintptr + s []decFn +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + z := ioDecReader{ + r: r, + } + z.br, _ = r.(io.ByteReader) + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + z := bytesDecReader{ + b: in, + a: len(in), + } + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +func (d *Decoder) decode(iv interface{}) { + d.d.initReadNext() + + switch v := iv.(type) { + case nil: + decErr("Cannot decode into nil.") + + case reflect.Value: + d.chkPtrValue(v) + d.decodeValue(v.Elem()) + + case *string: + *v = d.d.decodeString() + case *bool: + *v = d.d.decodeBool() + case *int: + *v = int(d.d.decodeInt(intBitsize)) + case *int8: + *v = int8(d.d.decodeInt(8)) + case *int16: + *v = int16(d.d.decodeInt(16)) + case *int32: + *v = int32(d.d.decodeInt(32)) + case *int64: + *v = d.d.decodeInt(64) + case *uint: + *v = uint(d.d.decodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.decodeUint(8)) + case *uint16: + *v = uint16(d.d.decodeUint(16)) + case *uint32: + *v = uint32(d.d.decodeUint(32)) + case *uint64: + *v = d.d.decodeUint(64) + case *float32: + *v = float32(d.d.decodeFloat(true)) + case *float64: + *v = d.d.decodeFloat(false) + case *[]byte: + *v, _ = d.d.decodeBytes(*v) + + case *[]interface{}: + d.decSliceIntf(v, valueTypeInvalid, false) + case *[]uint64: + d.decSliceUint64(v, valueTypeInvalid, false) + case *[]int64: + d.decSliceInt64(v, valueTypeInvalid, false) + case *[]string: + d.decSliceStr(v, valueTypeInvalid, false) + case *map[string]interface{}: + d.decMapStrIntf(v) + case *map[interface{}]interface{}: + d.decMapIntfIntf(v) + case *map[uint64]interface{}: + d.decMapUint64Intf(v) + case *map[int64]interface{}: + d.decMapInt64Intf(v) + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem()) + + default: + rv := reflect.ValueOf(iv) + d.chkPtrValue(rv) + d.decodeValue(rv.Elem()) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value) { + d.d.initReadNext() + + if d.d.tryDecodeAsNil() { + // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) + if rv.Kind() == reflect.Ptr { + if !rv.IsNil() { + rv.Set(reflect.Zero(rv.Type())) + } + return + } + // for rv.Kind() == reflect.Ptr { + // rv = rv.Elem() + // } + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var fn decFn + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i, v := range d.x { + if v == rtid { + fn, ok = d.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new dec fn for type: %v\n", rt) + fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} + fn.i = &fi + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.isBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*decFnInfo).ext + } else if supportBinaryMarshal && fi.ti.unm { + fn.f = (*decFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Slice: + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.array = true + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]decFn, 16) + } + d.f[rtid] = fn + } else { + d.s = append(d.s, fn) + d.x = append(d.x, rtid) + } + } + + fn.f(fn.i, rv) + + return +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + if !rv.IsValid() { + decErr("Cannot decode into a zero (ie invalid) reflect.Value") + } + if !rv.CanInterface() { + decErr("Cannot decode into a value without an interface: %v", rv) + } + rvi := rv.Interface() + decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", + rv.Kind(), rvi, rvi) +} + +func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { + // d.decodeValue(rv.FieldByIndex(index)) + // nil pointers may be here; so reproduce FieldByIndex logic + enhancements + for _, j := range index { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + // If a pointer, it must be a pointer to struct (based on typeInfo contract) + rv = rv.Elem() + } + rv = rv.Field(j) + } + d.decodeValue(rv) +} + +// -------------------------------------------------- + +// short circuit functions for common maps and slices + +func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]interface{}, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]interface{}, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + d.decode(&s[j]) + } + *v = s +} + +func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]int64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]int64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeInt(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]uint64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]uint64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeUint(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]string, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]string, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeString() + } + *v = s +} + +func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[interface{}]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + var mk interface{} + d.decode(&mk) + // special case if a byte array. + if bv, bok := mk.([]byte); bok { + mk = string(bv) + } + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[int64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeInt(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[uint64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeUint(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[string]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeString() + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +// ---------------------------------------- + +func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { + if currEncodedType == valueTypeInvalid { + currEncodedType = dd.currentEncodedType() + } + switch currEncodedType { + case valueTypeArray: + containerLen = dd.readArrayLen() + containerLenS = containerLen + case valueTypeMap: + containerLen = dd.readMapLen() + containerLenS = containerLen * 2 + default: + decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", + currEncodedType) + } + return +} + +func decErr(format string, params ...interface{}) { + doPanic(msgTagDec, format, params...) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go new file mode 100644 index 000000000..4914be0c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go @@ -0,0 +1,1001 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" +) + +const ( + // Some tagging information for error messages. + msgTagEnc = "codec.encoder" + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracting writing to a byte array or to an io.Writer. +type encWriter interface { + writeUint16(uint16) + writeUint32(uint32) + writeUint64(uint64) + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + isBuiltinType(rt uintptr) bool + encodeBuiltin(rt uintptr, v interface{}) + encodeNil() + encodeInt(i int64) + encodeUint(i uint64) + encodeBool(b bool) + encodeFloat32(f float32) + encodeFloat64(f float64) + encodeExtPreamble(xtag byte, length int) + encodeArrayPreamble(length int) + encodeMapPreamble(length int) + encodeString(c charEncoding, v string) + encodeSymbol(v string) + encodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) +} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map. + StructToArray bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + return o.w.Write([]byte(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeUint16(v uint16) { + bigen.PutUint16(z.x[:2], v) + z.writeb(z.x[:2]) +} + +func (z *ioEncWriter) writeUint32(v uint32) { + bigen.PutUint32(z.x[:4], v) + z.writeb(z.x[:4]) +} + +func (z *ioEncWriter) writeUint64(v uint64) { + bigen.PutUint64(z.x[:8], v) + z.writeb(z.x[:8]) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeUint16(v uint16) { + c := z.grow(2) + z.b[c] = byte(v >> 8) + z.b[c+1] = byte(v) +} + +func (z *bytesEncWriter) writeUint32(v uint32) { + c := z.grow(4) + z.b[c] = byte(v >> 24) + z.b[c+1] = byte(v >> 16) + z.b[c+2] = byte(v >> 8) + z.b[c+3] = byte(v) +} + +func (z *bytesEncWriter) writeUint64(v uint64) { + c := z.grow(8) + z.b[c] = byte(v >> 56) + z.b[c+1] = byte(v >> 48) + z.b[c+2] = byte(v >> 40) + z.b[c+3] = byte(v >> 32) + z.b[c+4] = byte(v >> 24) + z.b[c+5] = byte(v >> 16) + z.b[c+6] = byte(v >> 8) + z.b[c+7] = byte(v) +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > cap(z.b) { + // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). + // However, it was too expensive, causing too many iterations of copy. + // Using bytes.Buffer model was much better (2*cap + n) + bs := make([]byte, 2*cap(z.b)+n) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else if z.c > len(z.b) { + z.b = z.b[:cap(z.b)] + } + return +} + +// --------------------------------------------- + +type encFnInfo struct { + ti *typeInfo + e *Encoder + ee encDriver + xfFn func(reflect.Value) ([]byte, error) + xfTag byte +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + f.e.encRawExt(rv.Interface().(RawExt)) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + bs, fnerr := f.xfFn(rv) + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + return + } + if f.e.hh.writeExt() { + f.ee.encodeExtPreamble(f.xfTag, len(bs)) + f.e.w.writeb(bs) + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } + +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryMarshaler + if f.ti.mIndir == 0 { + bm = rv.Interface().(binaryMarshaler) + } else if f.ti.mIndir == -1 { + bm = rv.Addr().Interface().(binaryMarshaler) + } else { + for j, k := int8(0), f.ti.mIndir; j < k; j++ { + if rv.IsNil() { + f.ee.encodeNil() + return + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryMarshaler) + } + // debugf(">>>> binaryMarshaler: %T", rv.Interface()) + bs, fnerr := bm.MarshalBinary() + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.ee.encodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.ee.encodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.ee.encodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.ee.encodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.ee.encodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.ee.encodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.ee.encodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case intfSliceTypId: + f.e.encSliceIntf(rv.Interface().([]interface{})) + return + case strSliceTypId: + f.e.encSliceStr(rv.Interface().([]string)) + return + case uint64SliceTypId: + f.e.encSliceUint64(rv.Interface().([]uint64)) + return + case int64SliceTypId: + f.e.encSliceInt64(rv.Interface().([]int64)) + return + } + } + + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + return + } + + l := rv.Len() + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kArray(rv reflect.Value) { + // We cannot share kSlice method, because the array may be non-addressable. + // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". + // So we have to duplicate the functionality here. + // f.e.encodeValue(rv.Slice(0, rv.Len())) + // f.kSlice(rv.Slice(0, rv.Len())) + + l := rv.Len() + // Handle an array of bytes specially (in line with what is done for slices) + if f.ti.rt.Elem().Kind() == reflect.Uint8 { + if l == 0 { + f.ee.encodeStringBytes(c_RAW, nil) + return + } + var bs []byte + if rv.CanAddr() { + bs = rv.Slice(0, l).Bytes() + } else { + bs = make([]byte, l) + for i := 0; i < l; i++ { + bs[i] = byte(rv.Index(i).Uint()) + } + } + f.ee.encodeStringBytes(c_RAW, bs) + return + } + + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + newlen := len(fti.sfi) + rvals := make([]reflect.Value, newlen) + var encnames []string + e := f.e + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + encnames = make([]string, newlen) + } + newlen = 0 + for _, si := range tisfi { + if si.i != -1 { + rvals[newlen] = rv.Field(int(si.i)) + } else { + rvals[newlen] = rv.FieldByIndex(si.is) + } + if toMap { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + continue + } + encnames[newlen] = si.encName + } else { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + rvals[newlen] = reflect.Value{} //encode as nil + } + } + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + if toMap { + ee := f.ee //don't dereference everytime + ee.encodeMapPreamble(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + if asSymbols { + ee.encodeSymbol(encnames[j]) + } else { + ee.encodeString(c_UTF8, encnames[j]) + } + e.encodeValue(rvals[j]) + } + } else { + f.ee.encodeArrayPreamble(newlen) + for j := 0; j < newlen; j++ { + e.encodeValue(rvals[j]) + } + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.ee.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + f.e.encodeValue(rv.Elem()) +} + +func (f *encFnInfo) kMap(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case mapIntfIntfTypId: + f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) + return + case mapStrIntfTypId: + f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) + return + case mapStrStrTypId: + f.e.encMapStrStr(rv.Interface().(map[string]string)) + return + case mapInt64IntfTypId: + f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) + return + case mapUint64IntfTypId: + f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) + return + } + } + + l := rv.Len() + f.ee.encodeMapPreamble(l) + if l == 0 { + return + } + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + keyTypeIsString := f.ti.rt.Key() == stringTyp + var asSymbols bool + if keyTypeIsString { + asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if keyTypeIsString { + if asSymbols { + f.ee.encodeSymbol(mks[j].String()) + } else { + f.ee.encodeString(c_UTF8, mks[j].String()) + } + } else { + f.e.encodeValue(mks[j]) + } + f.e.encodeValue(rv.MapIndex(mks[j])) + } + +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i *encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + w encWriter + e encDriver + h *BasicHandle + hh Handle + f map[uintptr]encFn + x []uintptr + s []encFn +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + ww, ok := w.(ioEncWriterWriter) + if !ok { + sww := simpleIoEncWriterWriter{w: w} + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + ww = &sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + z := ioEncWriter{ + w: ww, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + z := bytesEncWriter{ + b: in, + out: out, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// Encode writes an object into a stream in the codec format. +// +// Encoding can be configured via the "codec" struct tag for the fields. +// +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's codec tag is "-", OR +// - the field is empty and its codec tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the codec tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline if no struct tag is present. +// Else they are encoded as regular fields. +// +// Examples: +// +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +func (e *Encoder) encode(iv interface{}) { + switch v := iv.(type) { + case nil: + e.e.encodeNil() + + case reflect.Value: + e.encodeValue(v) + + case string: + e.e.encodeString(c_UTF8, v) + case bool: + e.e.encodeBool(v) + case int: + e.e.encodeInt(int64(v)) + case int8: + e.e.encodeInt(int64(v)) + case int16: + e.e.encodeInt(int64(v)) + case int32: + e.e.encodeInt(int64(v)) + case int64: + e.e.encodeInt(v) + case uint: + e.e.encodeUint(uint64(v)) + case uint8: + e.e.encodeUint(uint64(v)) + case uint16: + e.e.encodeUint(uint64(v)) + case uint32: + e.e.encodeUint(uint64(v)) + case uint64: + e.e.encodeUint(v) + case float32: + e.e.encodeFloat32(v) + case float64: + e.e.encodeFloat64(v) + + case []interface{}: + e.encSliceIntf(v) + case []string: + e.encSliceStr(v) + case []int64: + e.encSliceInt64(v) + case []uint64: + e.encSliceUint64(v) + case []uint8: + e.e.encodeStringBytes(c_RAW, v) + + case map[interface{}]interface{}: + e.encMapIntfIntf(v) + case map[string]interface{}: + e.encMapStrIntf(v) + case map[string]string: + e.encMapStrStr(v) + case map[int64]interface{}: + e.encMapInt64Intf(v) + case map[uint64]interface{}: + e.encMapUint64Intf(v) + + case *string: + e.e.encodeString(c_UTF8, *v) + case *bool: + e.e.encodeBool(*v) + case *int: + e.e.encodeInt(int64(*v)) + case *int8: + e.e.encodeInt(int64(*v)) + case *int16: + e.e.encodeInt(int64(*v)) + case *int32: + e.e.encodeInt(int64(*v)) + case *int64: + e.e.encodeInt(*v) + case *uint: + e.e.encodeUint(uint64(*v)) + case *uint8: + e.e.encodeUint(uint64(*v)) + case *uint16: + e.e.encodeUint(uint64(*v)) + case *uint32: + e.e.encodeUint(uint64(*v)) + case *uint64: + e.e.encodeUint(*v) + case *float32: + e.e.encodeFloat32(*v) + case *float64: + e.e.encodeFloat64(*v) + + case *[]interface{}: + e.encSliceIntf(*v) + case *[]string: + e.encSliceStr(*v) + case *[]int64: + e.encSliceInt64(*v) + case *[]uint64: + e.encSliceUint64(*v) + case *[]uint8: + e.e.encodeStringBytes(c_RAW, *v) + + case *map[interface{}]interface{}: + e.encMapIntfIntf(*v) + case *map[string]interface{}: + e.encMapStrIntf(*v) + case *map[string]string: + e.encMapStrStr(*v) + case *map[int64]interface{}: + e.encMapInt64Intf(*v) + case *map[uint64]interface{}: + e.encMapUint64Intf(*v) + + default: + e.encodeValue(reflect.ValueOf(iv)) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value) { + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + e.e.encodeNil() + return + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } + var fn encFn + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i, v := range e.x { + if v == rtid { + fn, ok = e.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new enc fn for type: %v\n", rt) + fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} + fn.i = &fi + if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.isBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*encFnInfo).ext + } else if supportBinaryMarshal && fi.ti.m { + fn.f = (*encFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Slice: + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fn.f = (*encFnInfo).kArray + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]encFn, 16) + } + e.f[rtid] = fn + } else { + e.s = append(e.s, fn) + e.x = append(e.x, rtid) + } + } + + fn.f(fn.i, rv) + +} + +func (e *Encoder) encRawExt(re RawExt) { + if re.Data == nil { + e.e.encodeNil() + return + } + if e.hh.writeExt() { + e.e.encodeExtPreamble(re.Tag, len(re.Data)) + e.w.writeb(re.Data) + } else { + e.e.encodeStringBytes(c_RAW, re.Data) + } +} + +// --------------------------------------------- +// short circuit functions for common maps and slices + +func (e *Encoder) encSliceIntf(v []interface{}) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.encode(v2) + } +} + +func (e *Encoder) encSliceStr(v []string) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encSliceInt64(v []int64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeInt(v2) + } +} + +func (e *Encoder) encSliceUint64(v []uint64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeUint(v2) + } +} + +func (e *Encoder) encMapStrStr(v map[string]string) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encMapStrIntf(v map[string]interface{}) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.encode(v2) + } +} + +func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeInt(k2) + e.encode(v2) + } +} + +func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeUint(uint64(k2)) + e.encode(v2) + } +} + +func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } +} + +// ---------------------------------------- + +func encErr(format string, params ...interface{}) { + doPanic(msgTagEnc, format, params...) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go new file mode 100644 index 000000000..bdf448d52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go @@ -0,0 +1,75 @@ +// //+build ignore + +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// This file includes benchmarks which have dependencies on 3rdparty +// packages (bson and vmihailenco/msgpack) which must be installed locally. +// +// To run the benchmarks including these 3rdparty packages, first +// - Uncomment first line in this file (put // // in front of it) +// - Get those packages: +// go get github.com/vmihailenco/msgpack +// go get labix.org/v2/mgo/bson +// - Run: +// go test -bi -bench=. + +import ( + "testing" + + vmsgpack "gopkg.in/vmihailenco/msgpack.v2" + "labix.org/v2/mgo/bson" +) + +func init() { + benchCheckers = append(benchCheckers, + benchChecker{"v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn}, + benchChecker{"bson", fnBsonEncodeFn, fnBsonDecodeFn}, + ) +} + +func fnVMsgpackEncodeFn(ts interface{}) ([]byte, error) { + return vmsgpack.Marshal(ts) +} + +func fnVMsgpackDecodeFn(buf []byte, ts interface{}) error { + return vmsgpack.Unmarshal(buf, ts) +} + +func fnBsonEncodeFn(ts interface{}) ([]byte, error) { + return bson.Marshal(ts) +} + +func fnBsonDecodeFn(buf []byte, ts interface{}) error { + return bson.Unmarshal(buf, ts) +} + +func Benchmark__Bson_______Encode(b *testing.B) { + fnBenchmarkEncode(b, "bson", benchTs, fnBsonEncodeFn) +} + +func Benchmark__Bson_______Decode(b *testing.B) { + fnBenchmarkDecode(b, "bson", benchTs, fnBsonEncodeFn, fnBsonDecodeFn, fnBenchNewTs) +} + +func Benchmark__VMsgpack___Encode(b *testing.B) { + fnBenchmarkEncode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn) +} + +func Benchmark__VMsgpack___Decode(b *testing.B) { + fnBenchmarkDecode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn, fnVMsgpackDecodeFn, fnBenchNewTs) +} + +func TestMsgpackPythonGenStreams(t *testing.T) { + doTestMsgpackPythonGenStreams(t) +} + +func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { + doTestMsgpackRpcSpecGoClientToPythonSvc(t) +} + +func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { + doTestMsgpackRpcSpecPythonClientToGoSvc(t) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go new file mode 100644 index 000000000..e6dc0563f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go @@ -0,0 +1,589 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" +) + +const ( + structTagName = "codec" + + // Support + // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) + // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // This constant flag will enable or disable it. + supportBinaryMarshal = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + useMapForCodecCache = false + + // For some common container types, we can short-circuit an elaborate + // reflection dance and call encode/decode directly. + // The currently supported types are: + // - slices of strings, or id's (int64,uint64) or interfaces. + // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf + shortCircuitReflectToFastPath = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + valueTypeInvalid = 0xff +) + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + cachedTypeInfo = make(map[uintptr]*typeInfo, 4) + cachedTypeInfoMutex sync.RWMutex + + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + strSliceTyp = reflect.TypeOf([]string(nil)) + boolSliceTyp = reflect.TypeOf([]bool(nil)) + uintSliceTyp = reflect.TypeOf([]uint(nil)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uint16SliceTyp = reflect.TypeOf([]uint16(nil)) + uint32SliceTyp = reflect.TypeOf([]uint32(nil)) + uint64SliceTyp = reflect.TypeOf([]uint64(nil)) + intSliceTyp = reflect.TypeOf([]int(nil)) + int8SliceTyp = reflect.TypeOf([]int8(nil)) + int16SliceTyp = reflect.TypeOf([]int16(nil)) + int32SliceTyp = reflect.TypeOf([]int32(nil)) + int64SliceTyp = reflect.TypeOf([]int64(nil)) + float32SliceTyp = reflect.TypeOf([]float32(nil)) + float64SliceTyp = reflect.TypeOf([]float64(nil)) + + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) + + mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) + mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) + mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) + mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() + + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() + + boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() + uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() + uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() + uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() + intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() + int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() + int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() + int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() + int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() + float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() + float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() + + mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() + mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() + mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() + mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() + // Id = reflect.ValueOf().Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() + binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error +} + +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + extHandle + EncodeOptions + DecodeOptions +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + writeExt() bool + getBasicHandle() *BasicHandle + newEncDriver(w encWriter) encDriver + newDecDriver(r decReader) decDriver +} + +// RawExt represents raw unprocessed extension data. +type RawExt struct { + Tag byte + Data []byte +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag byte + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +type extHandle []*extTypeTagFn + +// AddExt registers an encode and decode function for a reflect.Type. +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, + tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error, +) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + // o cannot be nil, since it is always embedded in a Handle. + // if nil, let it panic. + // if o == nil { + // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") + // return + // } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.encFn, v.decFn = tag, encfn, decfn + return + } + } + + *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + for _, v := range o { + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { + for _, v := range o { + if v.tag == tag { + return v + } + } + return nil +} + +func (o extHandle) getDecodeExtForTag(tag byte) ( + rv reflect.Value, fn func(reflect.Value, []byte) error) { + if x := o.getExtForTag(tag); x != nil { + // ext is only registered for base + rv = reflect.New(x.rt).Elem() + fn = x.decFn + } + return +} + +func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.decFn + } + return +} + +func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.encFn + } + return +} + +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? + + // tag string // tag + // name string // field name + // encNameBs []byte // encoded name as byte stream + // ikind int // kind of the field as an int i.e. int(reflect.Kind) +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + if fname == "" { + panic("parseStructFieldInfo: No Field Name") + } + si := structFieldInfo{ + // name: fname, + encName: fname, + // tag: stag, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.omitEmpty = true + case "toarray": + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + m bool // base type (T or *T) is a binaryMarshaler + unm bool // base type (T or *T) is a binaryUnmarshaler + mIndir int8 // number of indirections to get to binaryMarshaler type + unmIndir int8 // number of indirections to get to binaryUnmarshaler type + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + cachedTypeInfoMutex.RLock() + pti, ok = cachedTypeInfo[rtid] + cachedTypeInfoMutex.RUnlock() + if ok { + return + } + + cachedTypeInfoMutex.Lock() + defer cachedTypeInfoMutex.Unlock() + if pti, ok = cachedTypeInfo[rtid]; ok { + return + } + + ti := typeInfo{rt: rt, rtid: rtid} + pti = &ti + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.m, ti.mIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.unm, ti.unmIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) + + // // try to put all si close together + // const tryToPutAllStructFieldInfoTogether = true + // if tryToPutAllStructFieldInfoTogether { + // sfip2 := make([]structFieldInfo, len(sfip)) + // for i, si := range sfip { + // sfip2[i] = *si + // } + // for i := range sfip { + // sfip[i] = &sfip2[i] + // } + // } + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + cachedTypeInfo[rtid] = pti + return +} + +func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + // for rt.Kind() == reflect.Ptr { + // // indexstack = append(indexstack, 0) + // rt = rt.Elem() + // } + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + stag := f.Tag.Get(structTagName) + if stag == "-" { + continue + } + if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + continue + } + // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. + if f.Anonymous && stag == "" { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + si := parseStructFieldInfo(f.Name, stag) + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +func doPanic(tag string, format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = tag + copy(params2[1:], params) + panic(fmt.Errorf("%s: "+format, params2...)) +} + +func checkOverflowFloat32(f float64, doCheck bool) { + if !doCheck { + return + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() + f2 := f + if f2 < 0 { + f2 = -f + } + if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { + decErr("Overflow float32 value: %v", f2) + } +} + +func checkOverflow(ui uint64, i int64, bitsize uint8) { + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize == 0 { + return + } + if i != 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + if ui != 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go new file mode 100644 index 000000000..58417da95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -0,0 +1,127 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +var ( + raisePanicAfterRecover = false + debugging = true +) + +func panicValToErr(panicVal interface{}, err *error) { + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + if raisePanicAfterRecover { + panic(panicVal) + } + return +} + +func isEmptyValueDeref(v reflect.Value, deref bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValueDeref(v.Elem(), deref) + } else { + return v.IsNil() + } + case reflect.Struct: + // return true if all fields are empty. else return false. + + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !isEmptyValueDeref(v.Field(i), deref) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + return isEmptyValueDeref(v, true) +} + +func debugf(format string, args ...interface{}) { + if debugging { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) + } +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go new file mode 100644 index 000000000..da0500d19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -0,0 +1,816 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + w encWriter + h *MsgpackHandle +} + +func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} + +func (e *msgpackEncDriver) encodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) encodeInt(i int64) { + + switch { + case i >= 0: + e.encodeUint(uint64(i)) + case i >= -32: + e.w.writen1(byte(i)) + case i >= math.MinInt8: + e.w.writen2(mpInt8, byte(i)) + case i >= math.MinInt16: + e.w.writen1(mpInt16) + e.w.writeUint16(uint16(i)) + case i >= math.MinInt32: + e.w.writen1(mpInt32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpInt64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeUint(i uint64) { + switch { + case i <= math.MaxInt8: + e.w.writen1(byte(i)) + case i <= math.MaxUint8: + e.w.writen2(mpUint8, byte(i)) + case i <= math.MaxUint16: + e.w.writen1(mpUint16) + e.w.writeUint16(uint16(i)) + case i <= math.MaxUint32: + e.w.writen1(mpUint32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpUint64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) encodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) encodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + switch { + case l == 1: + e.w.writen2(mpFixExt1, xtag) + case l == 2: + e.w.writen2(mpFixExt2, xtag) + case l == 4: + e.w.writen2(mpFixExt4, xtag) + case l == 8: + e.w.writen2(mpFixExt8, xtag) + case l == 16: + e.w.writen2(mpFixExt16, xtag) + case l < 256: + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + case l < 65536: + e.w.writen1(mpExt16) + e.w.writeUint16(uint16(l)) + e.w.writen1(xtag) + default: + e.w.writen1(mpExt32) + e.w.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) encodeArrayPreamble(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) encodeMapPreamble(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + switch { + case ct.hasFixMin && l < ct.fixCutoff: + e.w.writen1(ct.bFixMin | byte(l)) + case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + e.w.writen2(ct.b8, uint8(l)) + case l < 65536: + e.w.writen1(ct.b16) + e.w.writeUint16(uint16(l)) + default: + e.w.writen1(ct.b32) + e.w.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + r decReader + h *MsgpackHandle + bd byte + bdRead bool + bdType valueType +} + +func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + bd := d.bd + + switch bd { + case mpNil: + vt = valueTypeNil + d.bdRead = false + case mpFalse: + vt = valueTypeBool + v = false + case mpTrue: + vt = valueTypeBool + v = true + + case mpFloat: + vt = valueTypeFloat + v = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + vt = valueTypeFloat + v = math.Float64frombits(d.r.readUint64()) + + case mpUint8: + vt = valueTypeUint + v = uint64(d.r.readn1()) + case mpUint16: + vt = valueTypeUint + v = uint64(d.r.readUint16()) + case mpUint32: + vt = valueTypeUint + v = uint64(d.r.readUint32()) + case mpUint64: + vt = valueTypeUint + v = uint64(d.r.readUint64()) + + case mpInt8: + vt = valueTypeInt + v = int64(int8(d.r.readn1())) + case mpInt16: + vt = valueTypeInt + v = int64(int16(d.r.readUint16())) + case mpInt32: + vt = valueTypeInt + v = int64(int32(d.r.readUint32())) + case mpInt64: + vt = valueTypeInt + v = int64(int64(d.r.readUint64())) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + vt = valueTypeInt + v = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + vt = valueTypeInt + v = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + var rvm string + vt = valueTypeString + v = &rvm + } else { + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + } + decodeFurther = true + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + decodeFurther = true + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + vt = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + vt = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + clen := d.readExtLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(clen) + v = &re + vt = valueTypeExt + default: + decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(d.r.readUint16())) + case mpUint32: + i = int64(uint64(d.r.readUint32())) + case mpUint64: + i = int64(d.r.readUint64()) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(d.r.readUint16())) + case mpInt32: + i = int64(int32(d.r.readUint32())) + case mpInt64: + i = int64(d.r.readUint64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(d.r.readUint16()) + case mpUint32: + ui = uint64(d.r.readUint32()) + case mpUint64: + ui = d.r.readUint64() + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(d.r.readUint16())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(d.r.readUint32())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(d.r.readUint64()); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case mpFloat: + f = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + f = math.Float64frombits(d.r.readUint64()) + default: + f = float64(d.decodeInt(0)) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) decodeBool() (b bool) { + switch d.bd { + case mpFalse, 0: + // b = false + case mpTrue, 1: + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) decodeString() (s string) { + clen := d.readContainerLen(msgpackContainerStr) + if clen > 0 { + s = string(d.r.readn(clen)) + } + d.bdRead = false + return +} + +// Callers must check if changed=true (to decide whether to replace the one they have) +func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + var clen int + switch d.bd { + case mpBin8, mpBin16, mpBin32: + clen = d.readContainerLen(msgpackContainerBin) + default: + clen = d.readContainerLen(msgpackContainerStr) + } + // if clen < 0 { + // changed = true + // panic("length cannot be zero. this cannot be nil.") + // } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + // Return changed=true if length of passed slice diff from length of bytes in stream + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. +func (d *msgpackDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *msgpackDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + bd := d.bd + switch bd { + case mpNil: + d.bdType = valueTypeNil + case mpFalse, mpTrue: + d.bdType = valueTypeBool + case mpFloat, mpDouble: + d.bdType = valueTypeFloat + case mpUint8, mpUint16, mpUint32, mpUint64: + d.bdType = valueTypeUint + case mpInt8, mpInt16, mpInt32, mpInt64: + d.bdType = valueTypeInt + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + d.bdType = valueTypeInt + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + d.bdType = valueTypeInt + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + d.bdType = valueTypeString + } else { + d.bdType = valueTypeBytes + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.bdType = valueTypeBytes + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + d.bdType = valueTypeArray + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + d.bdType = valueTypeMap + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + d.bdType = valueTypeExt + default: + decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + } + return d.bdType +} + +func (d *msgpackDecDriver) tryDecodeAsNil() bool { + if d.bd == mpNil { + d.bdRead = false + return true + } + return false +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + switch { + case bd == mpNil: + clen = -1 // to represent nil + case bd == ct.b8: + clen = int(d.r.readn1()) + case bd == ct.b16: + clen = int(d.r.readUint16()) + case bd == ct.b32: + clen = int(d.r.readUint32()) + case (ct.bFixMin & bd) == ct.bFixMin: + clen = int(ct.bFixMin ^ bd) + default: + decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) readMapLen() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) readArrayLen() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(d.r.readUint16()) + case mpExt32: + clen = int(d.r.readUint32()) + default: + decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + xbd := d.bd + switch { + case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: + xbs, _ = d.decodeBytes(nil) + case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, + xbd >= mpFixStrMin && xbd <= mpFixStrMax: + xbs = []byte(d.decodeString()) + default: + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool +} + +func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { + return &msgpackEncDriver{w: w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { + return &msgpackDecDriver{r: r, h: h} +} + +func (h *MsgpackHandle) writeExt() bool { + return h.WriteExt +} + +func (h *MsgpackHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.cls { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py new file mode 100644 index 000000000..e933838c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +import msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464646464.0, + False, + True, + None, + "someday", + "", + "bytestring", + 1328176922000002000, + -2206187877999998000, + 0, + -6795364578871345152 + ] + l1 = [ + { "true": True, + "false": False }, + { "true": "True", + "false": False, + "uint16(1616)": 1616 }, + { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], + "int32":32323232, "bool": True, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890" }, + { True: "true", 8: False, "false": 0 } + ] + + l = [] + l.extend(l0) + l.append(l0) + l.extend(l1) + return l + +def build_test_data(destdir): + l = get_test_data_list() + for i in range(len(l)): + packer = msgpack.Packer() + serialized = packer.pack(l[i]) + f = open(os.path.join(destdir, str(i) + '.golden'), 'wb') + f.write(serialized) + f.close() + +def doRpcServer(port, stopTimeSec): + class EchoHandler(object): + def Echo123(self, msg1, msg2, msg3): + return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) + def EchoStruct(self, msg): + return ("%s" % msg) + + addr = msgpackrpc.Address('localhost', port) + server = msgpackrpc.Server(EchoHandler()) + server.listen(addr) + # run thread to stop it after stopTimeSec seconds if > 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: msgpack_test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go new file mode 100644 index 000000000..d014dbdcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bufio" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accomodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + cls bool +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.cls { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + } + if doFlush && c.bw != nil { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.cls { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.cls { + return io.EOF + } + c.cls = true + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go new file mode 100644 index 000000000..9e4d148a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import "math" + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + h *SimpleHandle + w encWriter + //b [8]byte +} + +func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { +} + +func (e *simpleEncDriver) encodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) encodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) encodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) encodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) encodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, uint8(v)) + case v <= math.MaxUint16: + e.w.writen1(bd + 1) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd + 2) + e.w.writeUint32(uint32(v)) + case v <= math.MaxUint64: + e.w.writen1(bd + 3) + e.w.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + switch { + case length == 0: + e.w.writen1(bd) + case length <= math.MaxUint8: + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + case length <= math.MaxUint16: + e.w.writen1(bd + 2) + e.w.writeUint16(uint16(length)) + case int64(length) <= math.MaxUint32: + e.w.writen1(bd + 3) + e.w.writeUint32(uint32(length)) + default: + e.w.writen1(bd + 4) + e.w.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) encodeArrayPreamble(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) encodeMapPreamble(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) encodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + h *SimpleHandle + r decReader + bdRead bool + bdType valueType + bd byte + //b [8]byte +} + +func (d *simpleDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *simpleDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.bd { + case simpleVdNil: + d.bdType = valueTypeNil + case simpleVdTrue, simpleVdFalse: + d.bdType = valueTypeBool + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + d.bdType = valueTypeUint + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + d.bdType = valueTypeInt + case simpleVdFloat32, simpleVdFloat64: + d.bdType = valueTypeFloat + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + d.bdType = valueTypeString + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.bdType = valueTypeBytes + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + d.bdType = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + d.bdType = valueTypeArray + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) + } + } + return d.bdType +} + +func (d *simpleDecDriver) tryDecodeAsNil() bool { + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +} + +func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + i = int64(ui) + case simpleVdPosInt + 1: + ui = uint64(d.r.readUint16()) + i = int64(ui) + case simpleVdPosInt + 2: + ui = uint64(d.r.readUint32()) + i = int64(ui) + case simpleVdPosInt + 3: + ui = uint64(d.r.readUint64()) + i = int64(ui) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 1: + ui = uint64(d.r.readUint16()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 2: + ui = uint64(d.r.readUint32()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 3: + ui = uint64(d.r.readUint64()) + i = -(int64(ui)) + neg = true + default: + decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // } + return +} + +func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(d.r.readUint32())) + case simpleVdFloat64: + f = math.Float64frombits(d.r.readUint64()) + default: + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + _, i, _ := d.decIntAny() + f = float64(i) + } else { + decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + } + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) decodeBool() (b bool) { + switch d.bd { + case simpleVdTrue: + b = true + case simpleVdFalse: + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) readMapLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) readArrayLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(d.r.readUint16()) + case 3: + ui := uint64(d.r.readUint32()) + checkOverflow(ui, 0, intBitsize) + return int(ui) + case 4: + ui := d.r.readUint64() + checkOverflow(ui, 0, intBitsize) + return int(ui) + } + decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) decodeString() (s string) { + s = string(d.r.readn(d.decLen())) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + if clen := d.decLen(); clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.bd { + case simpleVdNil: + vt = valueTypeNil + case simpleVdFalse: + vt = valueTypeBool + v = false + case simpleVdTrue: + vt = valueTypeBool + v = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + vt = valueTypeUint + ui, _, _ := d.decIntAny() + v = ui + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + vt = valueTypeInt + _, i, _ := d.decIntAny() + v = i + case simpleVdFloat32: + vt = valueTypeFloat + v = d.decodeFloat(true) + case simpleVdFloat64: + vt = valueTypeFloat + v = d.decodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + vt = valueTypeString + v = d.decodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + vt = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle +} + +func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { + return &simpleEncDriver{w: w, h: h} +} + +func (h *SimpleHandle) newDecDriver(r decReader) decDriver { + return &simpleDecDriver{r: r, h: h} +} + +func (_ *SimpleHandle) writeExt() bool { + return true +} + +func (h *SimpleHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go new file mode 100644 index 000000000..c86d65328 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go new file mode 100644 index 000000000..2e9b3a0f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go @@ -0,0 +1,103 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies related to testing live in this file, +// so porting to different environment is easy (just update functions). +// +// Also, this file is called z_helper_test, to give a "hint" to compiler +// that its init() function should be called last. (not guaranteed by spec) + +import ( + "errors" + "reflect" + "flag" + "testing" +) + +var ( + testLogToT = true + failNowOnFail = true +) + +func init() { + testInitFlags() + benchInitFlags() + flag.Parse() + testInit() + benchInit() +} + +func checkErrT(t *testing.T, err error) { + if err != nil { + logT(t, err.Error()) + failT(t) + } +} + +func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { + if err = deepEqual(v1, v2); err != nil { + logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) + failT(t) + } + return +} + +func logT(x interface{}, format string, args ...interface{}) { + if t, ok := x.(*testing.T); ok && t != nil && testLogToT { + t.Logf(format, args...) + } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { + b.Logf(format, args...) + } else { + debugf(format, args...) + } +} + +func failT(t *testing.T) { + if failNowOnFail { + t.FailNow() + } else { + t.Fail() + } +} + +func deepEqual(v1, v2 interface{}) (err error) { + if !reflect.DeepEqual(v1, v2) { + err = errors.New("Not Match") + } + return +} + +func approxDataSize(rv reflect.Value) (sum int) { + switch rk := rv.Kind(); rk { + case reflect.Invalid: + case reflect.Ptr, reflect.Interface: + sum += int(rv.Type().Size()) + sum += approxDataSize(rv.Elem()) + case reflect.Slice: + sum += int(rv.Type().Size()) + for j := 0; j < rv.Len(); j++ { + sum += approxDataSize(rv.Index(j)) + } + case reflect.String: + sum += int(rv.Type().Size()) + sum += rv.Len() + case reflect.Map: + sum += int(rv.Type().Size()) + for _, mk := range rv.MapKeys() { + sum += approxDataSize(mk) + sum += approxDataSize(rv.MapIndex(mk)) + } + case reflect.Struct: + //struct size already includes the full data size. + //sum += int(rv.Type().Size()) + for j := 0; j < rv.NumField(); j++ { + sum += approxDataSize(rv.Field(j)) + } + default: + //pure value types + sum += int(rv.Type().Size()) + } + return +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE new file mode 100644 index 000000000..f0e5c79e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md new file mode 100644 index 000000000..5d7180ab9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md @@ -0,0 +1,11 @@ +raft-boltdb +=========== + +This repository provides the `raftboltdb` package. The package exports the +`BoltStore` which is an implementation of both a `LogStore` and `StableStore`. + +It is meant to be used as a backend for the `raft` [package +here](https://github.com/hashicorp/raft). + +This implementation uses [BoltDB](https://github.com/boltdb/bolt). BoltDB is +a simple key/value store implemented in pure Go, and inspired by LMDB. diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go new file mode 100644 index 000000000..b860706fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go @@ -0,0 +1,88 @@ +package raftboltdb + +import ( + "os" + "testing" + + "github.com/hashicorp/raft/bench" +) + +func BenchmarkBoltStore_FirstIndex(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.FirstIndex(b, store) +} + +func BenchmarkBoltStore_LastIndex(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.LastIndex(b, store) +} + +func BenchmarkBoltStore_GetLog(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.GetLog(b, store) +} + +func BenchmarkBoltStore_StoreLog(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.StoreLog(b, store) +} + +func BenchmarkBoltStore_StoreLogs(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.StoreLogs(b, store) +} + +func BenchmarkBoltStore_DeleteRange(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.DeleteRange(b, store) +} + +func BenchmarkBoltStore_Set(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.Set(b, store) +} + +func BenchmarkBoltStore_Get(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.Get(b, store) +} + +func BenchmarkBoltStore_SetUint64(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.SetUint64(b, store) +} + +func BenchmarkBoltStore_GetUint64(b *testing.B) { + store := testBoltStore(b) + defer store.Close() + defer os.Remove(store.path) + + raftbench.GetUint64(b, store) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go new file mode 100644 index 000000000..ab6dd4803 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go @@ -0,0 +1,231 @@ +package raftboltdb + +import ( + "errors" + + "github.com/boltdb/bolt" + "github.com/hashicorp/raft" +) + +const ( + // Permissions to use on the db file. This is only used if the + // database file does not exist and needs to be created. + dbFileMode = 0600 +) + +var ( + // Bucket names we perform transactions in + dbLogs = []byte("logs") + dbConf = []byte("conf") + + // An error indicating a given key does not exist + ErrKeyNotFound = errors.New("not found") +) + +// BoltStore provides access to BoltDB for Raft to store and retrieve +// log entries. It also provides key/value storage, and can be used as +// a LogStore and StableStore. +type BoltStore struct { + // conn is the underlying handle to the db. + conn *bolt.DB + + // The path to the Bolt database file + path string +} + +// NewBoltStore takes a file path and returns a connected Raft backend. +func NewBoltStore(path string) (*BoltStore, error) { + // Try to connect + handle, err := bolt.Open(path, dbFileMode, nil) + if err != nil { + return nil, err + } + + // Create the new store + store := &BoltStore{ + conn: handle, + path: path, + } + + // Set up our buckets + if err := store.initialize(); err != nil { + store.Close() + return nil, err + } + + return store, nil +} + +// initialize is used to set up all of the buckets. +func (b *BoltStore) initialize() error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Create all the buckets + if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { + return err + } + if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { + return err + } + + return tx.Commit() +} + +// Close is used to gracefully close the DB connection. +func (b *BoltStore) Close() error { + return b.conn.Close() +} + +// FirstIndex returns the first known index from the Raft log. +func (b *BoltStore) FirstIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if first, _ := curs.First(); first == nil { + return 0, nil + } else { + return bytesToUint64(first), nil + } +} + +// LastIndex returns the last known index from the Raft log. +func (b *BoltStore) LastIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if last, _ := curs.Last(); last == nil { + return 0, nil + } else { + return bytesToUint64(last), nil + } +} + +// GetLog is used to retrieve a log from BoltDB at a given index. +func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error { + tx, err := b.conn.Begin(false) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbLogs) + val := bucket.Get(uint64ToBytes(idx)) + + if val == nil { + return raft.ErrLogNotFound + } + return decodeMsgPack(val, log) +} + +// StoreLog is used to store a single raft log +func (b *BoltStore) StoreLog(log *raft.Log) error { + return b.StoreLogs([]*raft.Log{log}) +} + +// StoreLogs is used to store a set of raft logs +func (b *BoltStore) StoreLogs(logs []*raft.Log) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + for _, log := range logs { + key := uint64ToBytes(log.Index) + val, err := encodeMsgPack(log) + if err != nil { + return err + } + bucket := tx.Bucket(dbLogs) + if err := bucket.Put(key, val.Bytes()); err != nil { + return err + } + } + + return tx.Commit() +} + +// DeleteRange is used to delete logs within a given range inclusively. +func (b *BoltStore) DeleteRange(min, max uint64) error { + minKey := uint64ToBytes(min) + + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { + // Handle out-of-range log index + if bytesToUint64(k) > max { + break + } + + // Delete in-range log index + if err := curs.Delete(); err != nil { + return err + } + } + + return tx.Commit() +} + +// Set is used to set a key/value set outside of the raft log +func (b *BoltStore) Set(k, v []byte) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + if err := bucket.Put(k, v); err != nil { + return err + } + + return tx.Commit() +} + +// Get is used to retrieve a value from the k/v store by key +func (b *BoltStore) Get(k []byte) ([]byte, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return nil, err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + val := bucket.Get(k) + + if val == nil { + return nil, ErrKeyNotFound + } + return append([]byte{}, val...), nil +} + +// SetUint64 is like Set, but handles uint64 values +func (b *BoltStore) SetUint64(key []byte, val uint64) error { + return b.Set(key, uint64ToBytes(val)) +} + +// GetUint64 is like Get, but handles uint64 values +func (b *BoltStore) GetUint64(key []byte) (uint64, error) { + val, err := b.Get(key) + if err != nil { + return 0, err + } + return bytesToUint64(val), nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go new file mode 100644 index 000000000..ab2a1525c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go @@ -0,0 +1,332 @@ +package raftboltdb + +import ( + "bytes" + "io/ioutil" + "os" + "reflect" + "testing" + + "github.com/boltdb/bolt" + "github.com/hashicorp/raft" +) + +func testBoltStore(t testing.TB) *BoltStore { + fh, err := ioutil.TempFile("", "bolt") + if err != nil { + t.Fatalf("err: %s", err) + } + os.Remove(fh.Name()) + + // Successfully creates and returns a store + store, err := NewBoltStore(fh.Name()) + if err != nil { + t.Fatalf("err: %s", err) + } + + return store +} + +func testRaftLog(idx uint64, data string) *raft.Log { + return &raft.Log{ + Data: []byte(data), + Index: idx, + } +} + +func TestBoltStore_Implements(t *testing.T) { + var store interface{} = &BoltStore{} + if _, ok := store.(raft.StableStore); !ok { + t.Fatalf("BoltStore does not implement raft.StableStore") + } + if _, ok := store.(raft.LogStore); !ok { + t.Fatalf("BoltStore does not implement raft.LogStore") + } +} + +func TestNewBoltStore(t *testing.T) { + fh, err := ioutil.TempFile("", "bolt") + if err != nil { + t.Fatalf("err: %s", err) + } + os.Remove(fh.Name()) + defer os.Remove(fh.Name()) + + // Successfully creates and returns a store + store, err := NewBoltStore(fh.Name()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the file was created + if store.path != fh.Name() { + t.Fatalf("unexpected file path %q", store.path) + } + if _, err := os.Stat(fh.Name()); err != nil { + t.Fatalf("err: %s", err) + } + + // Close the store so we can open again + if err := store.Close(); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure our tables were created + db, err := bolt.Open(fh.Name(), dbFileMode, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + tx, err := db.Begin(true) + if err != nil { + t.Fatalf("err: %s", err) + } + if _, err := tx.CreateBucket([]byte(dbLogs)); err != bolt.ErrBucketExists { + t.Fatalf("bad: %v", err) + } + if _, err := tx.CreateBucket([]byte(dbConf)); err != bolt.ErrBucketExists { + t.Fatalf("bad: %v", err) + } +} + +func TestBoltStore_FirstIndex(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Should get 0 index on empty log + idx, err := store.FirstIndex() + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 0 { + t.Fatalf("bad: %v", idx) + } + + // Set a mock raft log + logs := []*raft.Log{ + testRaftLog(1, "log1"), + testRaftLog(2, "log2"), + testRaftLog(3, "log3"), + } + if err := store.StoreLogs(logs); err != nil { + t.Fatalf("bad: %s", err) + } + + // Fetch the first Raft index + idx, err = store.FirstIndex() + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 1 { + t.Fatalf("bad: %d", idx) + } +} + +func TestBoltStore_LastIndex(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Should get 0 index on empty log + idx, err := store.LastIndex() + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 0 { + t.Fatalf("bad: %v", idx) + } + + // Set a mock raft log + logs := []*raft.Log{ + testRaftLog(1, "log1"), + testRaftLog(2, "log2"), + testRaftLog(3, "log3"), + } + if err := store.StoreLogs(logs); err != nil { + t.Fatalf("bad: %s", err) + } + + // Fetch the last Raft index + idx, err = store.LastIndex() + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 3 { + t.Fatalf("bad: %d", idx) + } +} + +func TestBoltStore_GetLog(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + log := new(raft.Log) + + // Should return an error on non-existent log + if err := store.GetLog(1, log); err != raft.ErrLogNotFound { + t.Fatalf("expected raft log not found error, got: %v", err) + } + + // Set a mock raft log + logs := []*raft.Log{ + testRaftLog(1, "log1"), + testRaftLog(2, "log2"), + testRaftLog(3, "log3"), + } + if err := store.StoreLogs(logs); err != nil { + t.Fatalf("bad: %s", err) + } + + // Should return the proper log + if err := store.GetLog(2, log); err != nil { + t.Fatalf("err: %s", err) + } + if !reflect.DeepEqual(log, logs[1]) { + t.Fatalf("bad: %#v", log) + } +} + +func TestBoltStore_SetLog(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Create the log + log := &raft.Log{ + Data: []byte("log1"), + Index: 1, + } + + // Attempt to store the log + if err := store.StoreLog(log); err != nil { + t.Fatalf("err: %s", err) + } + + // Retrieve the log again + result := new(raft.Log) + if err := store.GetLog(1, result); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the log comes back the same + if !reflect.DeepEqual(log, result) { + t.Fatalf("bad: %v", result) + } +} + +func TestBoltStore_SetLogs(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Create a set of logs + logs := []*raft.Log{ + testRaftLog(1, "log1"), + testRaftLog(2, "log2"), + } + + // Attempt to store the logs + if err := store.StoreLogs(logs); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure we stored them all + result1, result2 := new(raft.Log), new(raft.Log) + if err := store.GetLog(1, result1); err != nil { + t.Fatalf("err: %s", err) + } + if !reflect.DeepEqual(logs[0], result1) { + t.Fatalf("bad: %#v", result1) + } + if err := store.GetLog(2, result2); err != nil { + t.Fatalf("err: %s", err) + } + if !reflect.DeepEqual(logs[1], result2) { + t.Fatalf("bad: %#v", result2) + } +} + +func TestBoltStore_DeleteRange(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Create a set of logs + log1 := testRaftLog(1, "log1") + log2 := testRaftLog(2, "log2") + log3 := testRaftLog(3, "log3") + logs := []*raft.Log{log1, log2, log3} + + // Attempt to store the logs + if err := store.StoreLogs(logs); err != nil { + t.Fatalf("err: %s", err) + } + + // Attempt to delete a range of logs + if err := store.DeleteRange(1, 2); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the logs were deleted + if err := store.GetLog(1, new(raft.Log)); err != raft.ErrLogNotFound { + t.Fatalf("should have deleted log1") + } + if err := store.GetLog(2, new(raft.Log)); err != raft.ErrLogNotFound { + t.Fatalf("should have deleted log2") + } +} + +func TestBoltStore_Set_Get(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Returns error on non-existent key + if _, err := store.Get([]byte("bad")); err != ErrKeyNotFound { + t.Fatalf("expected not found error, got: %q", err) + } + + k, v := []byte("hello"), []byte("world") + + // Try to set a k/v pair + if err := store.Set(k, v); err != nil { + t.Fatalf("err: %s", err) + } + + // Try to read it back + val, err := store.Get(k) + if err != nil { + t.Fatalf("err: %s", err) + } + if !bytes.Equal(val, v) { + t.Fatalf("bad: %v", val) + } +} + +func TestBoltStore_SetUint64_GetUint64(t *testing.T) { + store := testBoltStore(t) + defer store.Close() + defer os.Remove(store.path) + + // Returns error on non-existent key + if _, err := store.GetUint64([]byte("bad")); err != ErrKeyNotFound { + t.Fatalf("expected not found error, got: %q", err) + } + + k, v := []byte("abc"), uint64(123) + + // Attempt to set the k/v pair + if err := store.SetUint64(k, v); err != nil { + t.Fatalf("err: %s", err) + } + + // Read back the value + val, err := store.GetUint64(k) + if err != nil { + t.Fatalf("err: %s", err) + } + if val != v { + t.Fatalf("bad: %v", val) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go new file mode 100644 index 000000000..68dd786b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go @@ -0,0 +1,37 @@ +package raftboltdb + +import ( + "bytes" + "encoding/binary" + + "github.com/hashicorp/go-msgpack/codec" +) + +// Decode reverses the encode operation on a byte slice input +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Converts bytes to an integer +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Converts a uint to a byte slice +func uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore b/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml b/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml new file mode 100644 index 000000000..5cf041d26 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.2 + - tip + +install: make deps +script: + - make integ + +notifications: + flowdock: + secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= + diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile b/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile new file mode 100644 index 000000000..c61b34a8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile @@ -0,0 +1,17 @@ +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) + +test: + go test -timeout=5s ./... + +integ: test + INTEG_TESTS=yes go test -timeout=3s -run=Integ ./... + +deps: + go get -d -v ./... + echo $(DEPS) | xargs -n1 go get -d + +cov: + INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html + open /tmp/coverage.html + +.PHONY: test cov integ deps diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/README.md b/Godeps/_workspace/src/github.com/hashicorp/raft/README.md new file mode 100644 index 000000000..ecb6c977e --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/README.md @@ -0,0 +1,89 @@ +raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) +==== + +raft is a [Go](http://www.golang.org) library that manages a replicated +log and can be used with an FSM to manage replicated state machines. It +is library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). + +The use cases for such a library are far-reaching as replicated state +machines are a key component of many distributed systems. They enable +building Consistent, Partition Tolerant (CP) systems, with limited +fault tolerance as well. + +## Building + +If you wish to build raft you'll need Go version 1.2+ installed. + +Please check your installation with: + +``` +go version +``` + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). + +To prevent complications with cgo, the primary backend `MDBStore` is in a separate repositoy, +called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation +for the `LogStore` and `StableStore`. + +A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called +[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` +and `StableStore`. + +## Protocol + +raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) + +A high level overview of the Raft protocol is described below, but for details please read the full +[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) +followed by the raft source. Any questions about the raft protocol should be sent to the +[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). + +### Protocol Description + +Raft nodes are always in one of three states: follower, candidate or leader. All +nodes initially start out as a follower. In this state, nodes can accept log entries +from a leader and cast votes. If no entries are received for some time, nodes +self-promote to the candidate state. In the candidate state nodes request votes from +their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. +The leader must accept new log entries and replicate to all the other followers. +In addition, if stale reads are not acceptable, all queries must also be performed on +the leader. + +Once a cluster has a leader, it is able to accept new log entries. A client can +request that a leader append a new log entry, which is an opaque binary blob to +Raft. The leader then writes the entry to durable storage and attempts to replicate +to a quorum of followers. Once the log entry is considered *committed*, it can be +*applied* to a finite state machine. The finite state machine is application specific, +and is implemented using an interface. + +An obvious question relates to the unbounded nature of a replicated log. Raft provides +a mechanism by which the current state is snapshotted, and the log is compacted. Because +of the FSM abstraction, restoring the state of the FSM must result in the same state +as a replay of old logs. This allows Raft to capture the FSM state at a point in time, +and then remove all the logs that were used to reach that state. This is performed automatically +without user intervention, and prevents unbounded disk usage as well as minimizing +time spent replaying logs. + +Lastly, there is the issue of updating the peer set when new servers are joining +or existing servers are leaving. As long as a quorum of nodes is available, this +is not an issue as Raft provides mechanisms to dynamically update the peer set. +If a quorum of nodes is unavailable, then this becomes a very challenging issue. +For example, suppose there are only 2 peers, A and B. The quorum size is also +2, meaning both nodes must agree to commit a log entry. If either A or B fails, +it is now impossible to reach quorum. This means the cluster is unable to add, +or remove a node, or commit any additional log entries. This results in *unavailability*. +At this point, manual intervention would be required to remove either A or B, +and to restart the remaining node in bootstrap mode. + +A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster +of 5 can tolerate 2 node failures. The recommended configuration is to either +run 3 or 5 raft servers. This maximizes availability without +greatly sacrificing performance. + +In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, +committing a log entry requires a single round trip to half of the cluster. +Thus performance is bound by disk I/O and network latency. + diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go b/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go new file mode 100644 index 000000000..d7a58f45f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go @@ -0,0 +1,171 @@ +package raftbench + +// raftbench provides common benchmarking functions which can be used by +// anything which implements the raft.LogStore and raft.StableStore interfaces. +// All functions accept these interfaces and perform benchmarking. This +// makes comparing backend performance easier by sharing the tests. + +import ( + "github.com/hashicorp/raft" + "testing" +) + +func FirstIndex(b *testing.B, store raft.LogStore) { + // Create some fake data + var logs []*raft.Log + for i := 1; i < 10; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Run FirstIndex a number of times + for n := 0; n < b.N; n++ { + store.FirstIndex() + } +} + +func LastIndex(b *testing.B, store raft.LogStore) { + // Create some fake data + var logs []*raft.Log + for i := 1; i < 10; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Run LastIndex a number of times + for n := 0; n < b.N; n++ { + store.LastIndex() + } +} + +func GetLog(b *testing.B, store raft.LogStore) { + // Create some fake data + var logs []*raft.Log + for i := 1; i < 10; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Run GetLog a number of times + for n := 0; n < b.N; n++ { + if err := store.GetLog(5, new(raft.Log)); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func StoreLog(b *testing.B, store raft.LogStore) { + // Run StoreLog a number of times + for n := 0; n < b.N; n++ { + log := &raft.Log{Index: uint64(n), Data: []byte("data")} + if err := store.StoreLog(log); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func StoreLogs(b *testing.B, store raft.LogStore) { + // Run StoreLogs a number of times. We want to set multiple logs each + // run, so we create 3 logs with incrementing indexes for each iteration. + for n := 0; n < b.N; n++ { + b.StopTimer() + offset := 3 * (n + 1) + logs := []*raft.Log{ + &raft.Log{Index: uint64(offset - 2), Data: []byte("data")}, + &raft.Log{Index: uint64(offset - 1), Data: []byte("data")}, + &raft.Log{Index: uint64(offset), Data: []byte("data")}, + } + b.StartTimer() + + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func DeleteRange(b *testing.B, store raft.LogStore) { + // Create some fake data. In this case, we create 3 new log entries for each + // test case, and separate them by index in multiples of 10. This allows + // some room so that we can test deleting ranges with "extra" logs to + // to ensure we stop going to the database once our max index is hit. + var logs []*raft.Log + for n := 0; n < b.N; n++ { + offset := 10 * n + for i := offset; i < offset+3; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Delete a range of the data + for n := 0; n < b.N; n++ { + offset := 10 * n + if err := store.DeleteRange(uint64(offset), uint64(offset+9)); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func Set(b *testing.B, store raft.StableStore) { + // Run Set a number of times + for n := 0; n < b.N; n++ { + if err := store.Set([]byte{byte(n)}, []byte("val")); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func Get(b *testing.B, store raft.StableStore) { + // Create some fake data + for i := 1; i < 10; i++ { + if err := store.Set([]byte{byte(i)}, []byte("val")); err != nil { + b.Fatalf("err: %s", err) + } + } + b.ResetTimer() + + // Run Get a number of times + for n := 0; n < b.N; n++ { + if _, err := store.Get([]byte{0x05}); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func SetUint64(b *testing.B, store raft.StableStore) { + // Run SetUint64 a number of times + for n := 0; n < b.N; n++ { + if err := store.SetUint64([]byte{byte(n)}, uint64(n)); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func GetUint64(b *testing.B, store raft.StableStore) { + // Create some fake data + for i := 0; i < 10; i++ { + if err := store.SetUint64([]byte{byte(i)}, uint64(i)); err != nil { + b.Fatalf("err: %s", err) + } + } + b.ResetTimer() + + // Run GetUint64 a number of times + for n := 0; n < b.N; n++ { + if _, err := store.Get([]byte{0x05}); err != nil { + b.Fatalf("err: %s", err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go b/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go new file mode 100644 index 000000000..fd0194841 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go @@ -0,0 +1,80 @@ +package raft + +// AppendEntriesRequest is the command used to append entries to the +// replicated log. +type AppendEntriesRequest struct { + // Provide the current term and leader + Term uint64 + Leader []byte + + // Provide the previous entries for integrity checking + PrevLogEntry uint64 + PrevLogTerm uint64 + + // New entries to commit + Entries []*Log + + // Commit index on the leader + LeaderCommitIndex uint64 +} + +// AppendEntriesResponse is the response returned from an +// AppendEntriesRequest. +type AppendEntriesResponse struct { + // Newer term if leader is out of date + Term uint64 + + // Last Log is a hint to help accelerate rebuilding slow nodes + LastLog uint64 + + // We may not succeed if we have a conflicting entry + Success bool +} + +// RequestVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestVoteRequest struct { + // Provide the term and our id + Term uint64 + Candidate []byte + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 +} + +// RequestVoteResponse is the response returned from a RequestVoteRequest. +type RequestVoteResponse struct { + // Newer term if leader is out of date + Term uint64 + + // Return the peers, so that a node can shutdown on removal + Peers []byte + + // Is the vote granted + Granted bool +} + +// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its +// log (and state machine) from a snapshot on another peer. +type InstallSnapshotRequest struct { + Term uint64 + Leader []byte + + // These are the last index/term included in the snapshot + LastLogIndex uint64 + LastLogTerm uint64 + + // Peer Set in the snapshot + Peers []byte + + // Size of the snapshot + Size int64 +} + +// InstallSnapshotResponse is the response returned from an +// InstallSnapshotRequest. +type InstallSnapshotResponse struct { + Term uint64 + Success bool +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/config.go b/Godeps/_workspace/src/github.com/hashicorp/raft/config.go new file mode 100644 index 000000000..047a88abe --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/config.go @@ -0,0 +1,125 @@ +package raft + +import ( + "fmt" + "io" + "log" + "time" +) + +// Config provides any necessary configuration to +// the Raft server +type Config struct { + // Time in follower state without a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // Time in candidate state without a leader before we attempt an election. + ElectionTimeout time.Duration + + // Time without an Apply() operation before we heartbeat to ensure + // a timely commit. Due to random staggering, may be delayed as much as + // 2x this value. + CommitTimeout time.Duration + + // MaxAppendEntries controls the maximum number of append entries + // to send at once. We want to strike a balance between efficiency + // and avoiding waste if the follower is going to reject because of + // an inconsistent log. + MaxAppendEntries int + + // If we are a member of a cluster, and RemovePeer is invoked for the + // local node, then we forget all peers and transition into the follower state. + // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, + // we can become a leader of a cluster containing only this node. + ShutdownOnRemove bool + + // DisableBootstrapAfterElect is used to turn off EnableSingleNode + // after the node is elected. This is used to prevent self-election + // if the node is removed from the Raft cluster via RemovePeer. Setting + // it to false will keep the bootstrap mode, allowing the node to self-elect + // and potentially bootstrap a separate cluster. + DisableBootstrapAfterElect bool + + // TrailingLogs controls how many logs we leave after a snapshot. This is + // used so that we can quickly replay logs on a follower instead of being + // forced to send an entire snapshot. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a snapshot. + // We randomly stagger between this value and 2x this value to avoid the entire + // cluster from performing a snapshot at once. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshots when we can + // just replay a small set of logs. + SnapshotThreshold uint64 + + // EnableSingleNode allows for a single node mode of operation. This + // is false by default, which prevents a lone node from electing itself. + // leader. + EnableSingleNode bool + + // LeaderLeaseTimeout is used to control how long the "lease" lasts + // for being the leader without being able to contact a quorum + // of nodes. If we reach this interval without contact, we will + // step down as leader. + LeaderLeaseTimeout time.Duration + + // LogOutput is used as a sink for logs, unless Logger is specified. + // Defaults to os.Stderr. + LogOutput io.Writer + + // Logger is a user-provided logger. If nil, a logger writing to LogOutput + // is used. + Logger *log.Logger +} + +// DefaultConfig returns a Config with usable defaults. +func DefaultConfig() *Config { + return &Config{ + HeartbeatTimeout: 1000 * time.Millisecond, + ElectionTimeout: 1000 * time.Millisecond, + CommitTimeout: 50 * time.Millisecond, + MaxAppendEntries: 64, + ShutdownOnRemove: true, + DisableBootstrapAfterElect: true, + TrailingLogs: 10240, + SnapshotInterval: 120 * time.Second, + SnapshotThreshold: 8192, + EnableSingleNode: false, + LeaderLeaseTimeout: 500 * time.Millisecond, + } +} + +// ValidateConfig is used to validate a sane configuration +func ValidateConfig(config *Config) error { + if config.HeartbeatTimeout < 5*time.Millisecond { + return fmt.Errorf("Heartbeat timeout is too low") + } + if config.ElectionTimeout < 5*time.Millisecond { + return fmt.Errorf("Election timeout is too low") + } + if config.CommitTimeout < time.Millisecond { + return fmt.Errorf("Commit timeout is too low") + } + if config.MaxAppendEntries <= 0 { + return fmt.Errorf("MaxAppendEntries must be positive") + } + if config.MaxAppendEntries > 1024 { + return fmt.Errorf("MaxAppendEntries is too large") + } + if config.SnapshotInterval < 5*time.Millisecond { + return fmt.Errorf("Snapshot interval is too low") + } + if config.LeaderLeaseTimeout < 5*time.Millisecond { + return fmt.Errorf("Leader lease timeout is too low") + } + if config.LeaderLeaseTimeout > config.HeartbeatTimeout { + return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") + } + if config.ElectionTimeout < config.HeartbeatTimeout { + return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go new file mode 100644 index 000000000..1b4611d55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go @@ -0,0 +1,48 @@ +package raft + +import ( + "fmt" + "io" +) + +// DiscardSnapshotStore is used to successfully snapshot while +// always discarding the snapshot. This is useful for when the +// log should be truncated but no snapshot should be retained. +// This should never be used for production use, and is only +// suitable for testing. +type DiscardSnapshotStore struct{} + +type DiscardSnapshotSink struct{} + +// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. +func NewDiscardSnapshotStore() *DiscardSnapshotStore { + return &DiscardSnapshotStore{} +} + +func (d *DiscardSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { + return &DiscardSnapshotSink{}, nil +} + +func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { + return nil, nil +} + +func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + return nil, nil, fmt.Errorf("open is not supported") +} + +func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { + return len(b), nil +} + +func (d *DiscardSnapshotSink) Close() error { + return nil +} + +func (d *DiscardSnapshotSink) ID() string { + return "discard" +} + +func (d *DiscardSnapshotSink) Cancel() error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go new file mode 100644 index 000000000..5abedfe2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go @@ -0,0 +1,17 @@ +package raft + +import "testing" + +func TestDiscardSnapshotStoreImpl(t *testing.T) { + var impl interface{} = &DiscardSnapshotStore{} + if _, ok := impl.(SnapshotStore); !ok { + t.Fatalf("DiscardSnapshotStore not a SnapshotStore") + } +} + +func TestDiscardSnapshotSinkImpl(t *testing.T) { + var impl interface{} = &DiscardSnapshotSink{} + if _, ok := impl.(SnapshotSink); !ok { + t.Fatalf("DiscardSnapshotSink not a SnapshotSink") + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go new file mode 100644 index 000000000..bda3d6d8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go @@ -0,0 +1,460 @@ +package raft + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +const ( + testPath = "permTest" + snapPath = "snapshots" + metaFilePath = "meta.json" + stateFilePath = "state.bin" + tmpSuffix = ".tmp" +) + +// FileSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. +type FileSnapshotStore struct { + path string + retain int + logger *log.Logger +} + +type snapMetaSlice []*fileSnapshotMeta + +// FileSnapshotSink implements SnapshotSink with a file. +type FileSnapshotSink struct { + store *FileSnapshotStore + logger *log.Logger + dir string + meta fileSnapshotMeta + + stateFile *os.File + stateHash hash.Hash64 + buffered *bufio.Writer + + closed bool +} + +// fileSnapshotMeta is stored on disk. We also put a CRC +// on disk so that we can verify the snapshot. +type fileSnapshotMeta struct { + SnapshotMeta + CRC []byte +} + +// bufferedFile is returned when we open a snapshot. This way +// reads are buffered and the file still gets closed. +type bufferedFile struct { + bh *bufio.Reader + fh *os.File +} + +func (b *bufferedFile) Read(p []byte) (n int, err error) { + return b.bh.Read(p) +} + +func (b *bufferedFile) Close() error { + return b.fh.Close() +} + +// NewFileSnapshotStore creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logOutput == nil { + logOutput = os.Stderr + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &FileSnapshotStore{ + path: path, + retain: retain, + logger: log.New(logOutput, "", log.LstdFlags), + } + + // Do a permissions test + if err := store.testPermissions(); err != nil { + return nil, fmt.Errorf("permissions test failed: %v", err) + } + return store, nil +} + +// testPermissions tries to touch a file in our path to see if it works. +func (f *FileSnapshotStore) testPermissions() error { + path := filepath.Join(f.path, testPath) + fh, err := os.Create(path) + if err != nil { + return err + } + fh.Close() + os.Remove(path) + return nil +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} + +// Create is used to start a new snapshot +func (f *FileSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { + // Create a new path + name := snapshotName(term, index) + path := filepath.Join(f.path, name+tmpSuffix) + f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path) + + // Make the directory + if err := os.MkdirAll(path, 0755); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err) + return nil, err + } + + // Create the sink + sink := &FileSnapshotSink{ + store: f, + logger: f.logger, + dir: path, + meta: fileSnapshotMeta{ + SnapshotMeta: SnapshotMeta{ + ID: name, + Index: index, + Term: term, + Peers: peers, + }, + CRC: nil, + }, + } + + // Write out the meta data + if err := sink.writeMeta(); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return nil, err + } + + // Open the state file + statePath := filepath.Join(path, stateFilePath) + fh, err := os.Create(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err) + return nil, err + } + sink.stateFile = fh + + // Create a CRC64 hash + sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Wrap both the hash and file in a MultiWriter with buffering + multi := io.MultiWriter(sink.stateFile, sink.stateHash) + sink.buffered = bufio.NewWriter(multi) + + // Done + return sink, nil +} + +// List returns available snapshots in the store. +func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return nil, err + } + + var snapMeta []*SnapshotMeta + for _, meta := range snapshots { + snapMeta = append(snapMeta, &meta.SnapshotMeta) + if len(snapMeta) == f.retain { + break + } + } + return snapMeta, nil +} + +// getSnapshots returns all the known snapshots. +func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := ioutil.ReadDir(f.path) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err) + return nil, err + } + + // Populate the metadata + var snapMeta []*fileSnapshotMeta + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Ignore any temporary snapshots + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName) + continue + } + + // Try to read the meta data + meta, err := f.readMeta(dirName) + if err != nil { + f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err) + continue + } + + // Append, but only return up to the retain count + snapMeta = append(snapMeta, meta) + } + + // Sort the snapshot, reverse so we get new -> old + sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) + + return snapMeta, nil +} + +// readMeta is used to read the meta data for a given named backup +func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { + // Open the meta file + metaPath := filepath.Join(f.path, name, metaFilePath) + fh, err := os.Open(metaPath) + if err != nil { + return nil, err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewReader(fh) + + // Read in the JSON + meta := &fileSnapshotMeta{} + dec := json.NewDecoder(buffered) + if err := dec.Decode(meta); err != nil { + return nil, err + } + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + // Get the metadata + meta, err := f.readMeta(id) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err) + return nil, nil, err + } + + // Open the state file + statePath := filepath.Join(f.path, id, stateFilePath) + fh, err := os.Open(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err) + return nil, nil, err + } + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + _, err = io.Copy(stateHash, fh) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err) + fh.Close() + return nil, nil, err + } + + // Verify the hash + computed := stateHash.Sum(nil) + if bytes.Compare(meta.CRC, computed) != 0 { + f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)", + meta.CRC, computed) + fh.Close() + return nil, nil, fmt.Errorf("CRC mismatch") + } + + // Seek to the start + if _, err := fh.Seek(0, 0); err != nil { + f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err) + fh.Close() + return nil, nil, err + } + + // Return a buffered file + buffered := &bufferedFile{ + bh: bufio.NewReader(fh), + fh: fh, + } + + return &meta.SnapshotMeta, buffered, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *FileSnapshotStore) ReapSnapshots() error { + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return err + } + + for i := f.retain; i < len(snapshots); i++ { + path := filepath.Join(f.path, snapshots[i].ID) + f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) + return err + } + } + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *FileSnapshotSink) ID() string { + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *FileSnapshotSink) Write(b []byte) (int, error) { + return s.buffered.Write(b) +} + +// Close is used to indicate a successful end. +func (s *FileSnapshotSink) Close() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Write out the meta data + if err := s.writeMeta(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return err + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + if err := os.Rename(s.dir, newPath); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) + return err + } + + // Reap any old snapshots + s.store.ReapSnapshots() + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *FileSnapshotSink) Cancel() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) +} + +// finalize is used to close all of our resources. +func (s *FileSnapshotSink) finalize() error { + // Flush any remaining data + if err := s.buffered.Flush(); err != nil { + return err + } + + // Get the file size + stat, statErr := s.stateFile.Stat() + + // Close the file + if err := s.stateFile.Close(); err != nil { + return err + } + + // Set the file size, check after we close + if statErr != nil { + return statErr + } + s.meta.Size = stat.Size() + + // Set the CRC + s.meta.CRC = s.stateHash.Sum(nil) + return nil +} + +// writeMeta is used to write out the metadata we have. +func (s *FileSnapshotSink) writeMeta() error { + // Open the meta file + metaPath := filepath.Join(s.dir, metaFilePath) + fh, err := os.Create(metaPath) + if err != nil { + return err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewWriter(fh) + defer buffered.Flush() + + // Write out as JSON + enc := json.NewEncoder(buffered) + if err := enc.Encode(&s.meta); err != nil { + return err + } + return nil +} + +// Implement the sort interface for []*fileSnapshotMeta. +func (s snapMetaSlice) Len() int { + return len(s) +} + +func (s snapMetaSlice) Less(i, j int) bool { + if s[i].Term != s[j].Term { + return s[i].Term < s[j].Term + } + if s[i].Index != s[j].Index { + return s[i].Index < s[j].Index + } + return s[i].ID < s[j].ID +} + +func (s snapMetaSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go new file mode 100644 index 000000000..7620c1938 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go @@ -0,0 +1,343 @@ +package raft + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "runtime" + "testing" +) + +func FileSnapTest(t *testing.T) (string, *FileSnapshotStore) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + snap, err := NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + return dir, snap +} + +func TestFileSnapshotStoreImpl(t *testing.T) { + var impl interface{} = &FileSnapshotStore{} + if _, ok := impl.(SnapshotStore); !ok { + t.Fatalf("FileSnapshotStore not a SnapshotStore") + } +} + +func TestFileSnapshotSinkImpl(t *testing.T) { + var impl interface{} = &FileSnapshotSink{} + if _, ok := impl.(SnapshotSink); !ok { + t.Fatalf("FileSnapshotSink not a SnapshotSink") + } +} + +func TestFileSS_CreateSnapshotMissingParentDir(t *testing.T) { + parent, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(parent) + + dir, err := ioutil.TempDir(parent, "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + snap, err := NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + os.RemoveAll(parent) + peers := []byte("all my lovely friends") + _, err = snap.Create(10, 3, peers) + if err != nil { + t.Fatalf("should not fail when using non existing parent") + } + +} +func TestFileSS_CreateSnapshot(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + snap, err := NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check no snapshots + snaps, err := snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 0 { + t.Fatalf("did not expect any snapshots: %v", snaps) + } + + // Create a new sink + peers := []byte("all my lovely friends") + sink, err := snap.Create(10, 3, peers) + if err != nil { + t.Fatalf("err: %v", err) + } + + // The sink is not done, should not be in a list! + snaps, err = snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 0 { + t.Fatalf("did not expect any snapshots: %v", snaps) + } + + // Write to the sink + _, err = sink.Write([]byte("first\n")) + if err != nil { + t.Fatalf("err: %v", err) + } + _, err = sink.Write([]byte("second\n")) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Done! + err = sink.Close() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should have a snapshot! + snaps, err = snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 1 { + t.Fatalf("expect a snapshots: %v", snaps) + } + + // Check the latest + latest := snaps[0] + if latest.Index != 10 { + t.Fatalf("bad snapshot: %v", *latest) + } + if latest.Term != 3 { + t.Fatalf("bad snapshot: %v", *latest) + } + if bytes.Compare(latest.Peers, peers) != 0 { + t.Fatalf("bad snapshot: %v", *latest) + } + if latest.Size != 13 { + t.Fatalf("bad snapshot: %v", *latest) + } + + // Read the snapshot + _, r, err := snap.Open(latest.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read out everything + var buf bytes.Buffer + if _, err := io.Copy(&buf, r); err != nil { + t.Fatalf("err: %v", err) + } + if err := r.Close(); err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure a match + if bytes.Compare(buf.Bytes(), []byte("first\nsecond\n")) != 0 { + t.Fatalf("content mismatch") + } +} + +func TestFileSS_CancelSnapshot(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + snap, err := NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create a new sink + peers := []byte("all my lovely friends") + sink, err := snap.Create(10, 3, peers) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Cancel the snapshot! Should delete + err = sink.Cancel() + if err != nil { + t.Fatalf("err: %v", err) + } + + // The sink is canceled, should not be in a list! + snaps, err := snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 0 { + t.Fatalf("did not expect any snapshots: %v", snaps) + } +} + +func TestFileSS_Retention(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + snap, err := NewFileSnapshotStore(dir, 2, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create a new sink + peers := []byte("all my lovely friends") + + // Create a few snapshots + for i := 10; i < 15; i++ { + sink, err := snap.Create(uint64(i), 3, peers) + if err != nil { + t.Fatalf("err: %v", err) + } + err = sink.Close() + if err != nil { + t.Fatalf("err: %v", err) + } + } + + // Should only have 2 listed! + snaps, err := snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 2 { + t.Fatalf("expect 2 snapshots: %v", snaps) + } + + // Check they are the latest + if snaps[0].Index != 14 { + t.Fatalf("bad snap: %#v", *snaps[0]) + } + if snaps[1].Index != 13 { + t.Fatalf("bad snap: %#v", *snaps[1]) + } +} + +func TestFileSS_BadPerm(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping file permission test on windows") + } + + // Create a temp dir + dir1, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(dir1) + + // Create a sub dir and remove all permissions + dir2, err := ioutil.TempDir(dir1, "badperm") + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chmod(dir2, 000); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chmod(dir2, 777) // Set perms back for delete + + // Should fail + if _, err := NewFileSnapshotStore(dir2, 3, nil); err == nil { + t.Fatalf("should fail to use dir with bad perms") + } +} + +func TestFileSS_MissingParentDir(t *testing.T) { + parent, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(parent) + + dir, err := ioutil.TempDir(parent, "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + os.RemoveAll(parent) + _, err = NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("should not fail when using non existing parent") + } +} + +func TestFileSS_Ordering(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + snap, err := NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create a new sink + peers := []byte("all my lovely friends") + + sink, err := snap.Create(130350, 5, peers) + if err != nil { + t.Fatalf("err: %v", err) + } + err = sink.Close() + if err != nil { + t.Fatalf("err: %v", err) + } + + sink, err = snap.Create(204917, 36, peers) + if err != nil { + t.Fatalf("err: %v", err) + } + err = sink.Close() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should only have 2 listed! + snaps, err := snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 2 { + t.Fatalf("expect 2 snapshots: %v", snaps) + } + + // Check they are ordered + if snaps[0].Term != 36 { + t.Fatalf("bad snap: %#v", *snaps[0]) + } + if snaps[1].Term != 5 { + t.Fatalf("bad snap: %#v", *snaps[1]) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go b/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go new file mode 100644 index 000000000..ea8ab548d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go @@ -0,0 +1,37 @@ +package raft + +import ( + "io" +) + +// FSM provides an interface that can be implemented by +// clients to make use of the replicated log. +type FSM interface { + // Apply log is invoked once a log entry is committed. + Apply(*Log) interface{} + + // Snapshot is used to support log compaction. This call should + // return an FSMSnapshot which can be used to save a point-in-time + // snapshot of the FSM. Apply and Snapshot are not called in multiple + // threads, but Apply will be called concurrently with Persist. This means + // the FSM should be implemented in a fashion that allows for concurrent + // updates while a snapshot is happening. + Snapshot() (FSMSnapshot, error) + + // Restore is used to restore an FSM from a snapshot. It is not called + // concurrently with any other command. The FSM must discard all previous + // state. + Restore(io.ReadCloser) error +} + +// FSMSnapshot is returned by an FSM in response to a Snapshot +// It must be safe to invoke FSMSnapshot methods with concurrent +// calls to Apply. +type FSMSnapshot interface { + // Persist should dump all necessary state to the WriteCloser 'sink', + // and call sink.Close() when finished or call sink.Cancel() on error. + Persist(sink SnapshotSink) error + + // Release is invoked when we are finished with the snapshot. + Release() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/future.go b/Godeps/_workspace/src/github.com/hashicorp/raft/future.go new file mode 100644 index 000000000..854e1ac92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/future.go @@ -0,0 +1,182 @@ +package raft + +import ( + "sync" + "time" +) + +// Future is used to represent an action that may occur in the future. +type Future interface { + Error() error +} + +// ApplyFuture is used for Apply() and can returns the FSM response. +type ApplyFuture interface { + Future + Response() interface{} + Index() uint64 +} + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// deferError can be embedded to allow a future +// to provide an error in the future. +type deferError struct { + err error + errCh chan error + responded bool +} + +func (d *deferError) init() { + d.errCh = make(chan error, 1) +} + +func (d *deferError) Error() error { + if d.err != nil { + return d.err + } + if d.errCh == nil { + panic("waiting for response on nil channel") + } + d.err = <-d.errCh + return d.err +} + +func (d *deferError) respond(err error) { + if d.errCh == nil { + return + } + if d.responded { + return + } + d.errCh <- err + close(d.errCh) + d.responded = true +} + +// logFuture is used to apply a log entry and waits until +// the log is considered committed. +type logFuture struct { + deferError + log Log + policy quorumPolicy + response interface{} + dispatch time.Time +} + +func (l *logFuture) Response() interface{} { + return l.response +} + +func (l *logFuture) Index() uint64 { + return l.log.Index +} + +type peerFuture struct { + deferError + peers []string +} + +type shutdownFuture struct { + raft *Raft +} + +func (s *shutdownFuture) Error() error { + for s.raft.getRoutines() > 0 { + time.Sleep(5 * time.Millisecond) + } + return nil +} + +// snapshotFuture is used for waiting on a snapshot to complete. +type snapshotFuture struct { + deferError +} + +// reqSnapshotFuture is used for requesting a snapshot start. +// It is only used internally. +type reqSnapshotFuture struct { + deferError + + // snapshot details provided by the FSM runner before responding + index uint64 + term uint64 + peers []string + snapshot FSMSnapshot +} + +// restoreFuture is used for requesting an FSM to perform a +// snapshot restore. Used internally only. +type restoreFuture struct { + deferError + ID string +} + +// verifyFuture is used to verify the current node is still +// the leader. This is to prevent a stale read. +type verifyFuture struct { + deferError + notifyCh chan *verifyFuture + quorumSize int + votes int + voteLock sync.Mutex +} + +// vote is used to respond to a verifyFuture. +// This may block when responding on the notifyCh. +func (v *verifyFuture) vote(leader bool) { + v.voteLock.Lock() + defer v.voteLock.Unlock() + + // Guard against having notified already + if v.notifyCh == nil { + return + } + + if leader { + v.votes++ + if v.votes >= v.quorumSize { + v.notifyCh <- v + v.notifyCh = nil + } + } else { + v.notifyCh <- v + v.notifyCh = nil + } +} + +// appendFuture is used for waiting on a pipelined append +// entries RPC. +type appendFuture struct { + deferError + start time.Time + args *AppendEntriesRequest + resp *AppendEntriesResponse +} + +func (a *appendFuture) Start() time.Time { + return a.start +} + +func (a *appendFuture) Request() *AppendEntriesRequest { + return a.args +} + +func (a *appendFuture) Response() *AppendEntriesResponse { + return a.resp +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go new file mode 100644 index 000000000..7014ff503 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go @@ -0,0 +1,213 @@ +package raft + +import ( + "container/list" + "sync" +) + +// QuorumPolicy allows individual logFutures to have different +// commitment rules while still using the inflight mechanism. +type quorumPolicy interface { + // Checks if a commit from a given peer is enough to + // satisfy the commitment rules + Commit() bool + + // Checks if a commit is committed + IsCommitted() bool +} + +// MajorityQuorum is used by Apply transactions and requires +// a simple majority of nodes. +type majorityQuorum struct { + count int + votesNeeded int +} + +func newMajorityQuorum(clusterSize int) *majorityQuorum { + votesNeeded := (clusterSize / 2) + 1 + return &majorityQuorum{count: 0, votesNeeded: votesNeeded} +} + +func (m *majorityQuorum) Commit() bool { + m.count++ + return m.count >= m.votesNeeded +} + +func (m *majorityQuorum) IsCommitted() bool { + return m.count >= m.votesNeeded +} + +// Inflight is used to track operations that are still in-flight. +type inflight struct { + sync.Mutex + committed *list.List + commitCh chan struct{} + minCommit uint64 + maxCommit uint64 + operations map[uint64]*logFuture + stopCh chan struct{} +} + +// NewInflight returns an inflight struct that notifies +// the provided channel when logs are finished committing. +func newInflight(commitCh chan struct{}) *inflight { + return &inflight{ + committed: list.New(), + commitCh: commitCh, + minCommit: 0, + maxCommit: 0, + operations: make(map[uint64]*logFuture), + stopCh: make(chan struct{}), + } +} + +// Start is used to mark a logFuture as being inflight. It +// also commits the entry, as it is assumed the leader is +// starting. +func (i *inflight) Start(l *logFuture) { + i.Lock() + defer i.Unlock() + i.start(l) +} + +// StartAll is used to mark a list of logFuture's as being +// inflight. It also commits each entry as the leader is +// assumed to be starting. +func (i *inflight) StartAll(logs []*logFuture) { + i.Lock() + defer i.Unlock() + for _, l := range logs { + i.start(l) + } +} + +// start is used to mark a single entry as inflight, +// must be invoked with the lock held. +func (i *inflight) start(l *logFuture) { + idx := l.log.Index + i.operations[idx] = l + + if idx > i.maxCommit { + i.maxCommit = idx + } + if i.minCommit == 0 { + i.minCommit = idx + } + i.commit(idx) +} + +// Cancel is used to cancel all in-flight operations. +// This is done when the leader steps down, and all futures +// are sent the given error. +func (i *inflight) Cancel(err error) { + // Close the channel first to unblock any pending commits + close(i.stopCh) + + // Lock after close to avoid deadlock + i.Lock() + defer i.Unlock() + + // Respond to all inflight operations + for _, op := range i.operations { + op.respond(err) + } + + // Clear all the committed but not processed + for e := i.committed.Front(); e != nil; e = e.Next() { + e.Value.(*logFuture).respond(err) + } + + // Clear the map + i.operations = make(map[uint64]*logFuture) + + // Clear the list of committed + i.committed = list.New() + + // Close the commmitCh + close(i.commitCh) + + // Reset indexes + i.minCommit = 0 + i.maxCommit = 0 +} + +// Committed returns all the committed operations in order. +func (i *inflight) Committed() (l *list.List) { + i.Lock() + l, i.committed = i.committed, list.New() + i.Unlock() + return l +} + +// Commit is used by leader replication routines to indicate that +// a follower was finished committing a log to disk. +func (i *inflight) Commit(index uint64) { + i.Lock() + defer i.Unlock() + i.commit(index) +} + +// CommitRange is used to commit a range of indexes inclusively. +// It is optimized to avoid commits for indexes that are not tracked. +func (i *inflight) CommitRange(minIndex, maxIndex uint64) { + i.Lock() + defer i.Unlock() + + // Update the minimum index + minIndex = max(i.minCommit, minIndex) + + // Commit each index + for idx := minIndex; idx <= maxIndex; idx++ { + i.commit(idx) + } +} + +// commit is used to commit a single index. Must be called with the lock held. +func (i *inflight) commit(index uint64) { + op, ok := i.operations[index] + if !ok { + // Ignore if not in the map, as it may be committed already + return + } + + // Check if we've satisfied the commit + if !op.policy.Commit() { + return + } + + // Cannot commit if this is not the minimum inflight. This can happen + // if the quorum size changes, meaning a previous commit requires a larger + // quorum that this commit. We MUST block until the previous log is committed, + // otherwise logs will be applied out of order. + if index != i.minCommit { + return + } + +NOTIFY: + // Add the operation to the committed list + i.committed.PushBack(op) + + // Stop tracking since it is committed + delete(i.operations, index) + + // Update the indexes + if index == i.maxCommit { + i.minCommit = 0 + i.maxCommit = 0 + + } else { + i.minCommit++ + } + + // Check if the next in-flight operation is ready + if i.minCommit != 0 { + op = i.operations[i.minCommit] + if op.policy.IsCommitted() { + index = i.minCommit + goto NOTIFY + } + } + + // Async notify of ready operations + asyncNotifyCh(i.commitCh) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go new file mode 100644 index 000000000..a9f57d6ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go @@ -0,0 +1,150 @@ +package raft + +import ( + "fmt" + "testing" +) + +func TestInflight_StartCommit(t *testing.T) { + commitCh := make(chan struct{}, 1) + in := newInflight(commitCh) + + // Commit a transaction as being in flight + l := &logFuture{log: Log{Index: 1}} + l.policy = newMajorityQuorum(5) + in.Start(l) + + // Commit 3 times + in.Commit(1) + if in.Committed().Len() != 0 { + t.Fatalf("should not be commited") + } + + in.Commit(1) + if in.Committed().Len() != 1 { + t.Fatalf("should be commited") + } + + // Already committed but should work anyways + in.Commit(1) +} + +func TestInflight_Cancel(t *testing.T) { + commitCh := make(chan struct{}, 1) + in := newInflight(commitCh) + + // Commit a transaction as being in flight + l := &logFuture{ + log: Log{Index: 1}, + } + l.init() + l.policy = newMajorityQuorum(3) + in.Start(l) + + // Cancel with an error + err := fmt.Errorf("error 1") + in.Cancel(err) + + // Should get an error return + if l.Error() != err { + t.Fatalf("expected error") + } +} + +func TestInflight_StartAll(t *testing.T) { + commitCh := make(chan struct{}, 1) + in := newInflight(commitCh) + + // Commit a few transaction as being in flight + l1 := &logFuture{log: Log{Index: 2}} + l1.policy = newMajorityQuorum(5) + l2 := &logFuture{log: Log{Index: 3}} + l2.policy = newMajorityQuorum(5) + l3 := &logFuture{log: Log{Index: 4}} + l3.policy = newMajorityQuorum(5) + + // Start all the entries + in.StartAll([]*logFuture{l1, l2, l3}) + + // Commit ranges + in.CommitRange(1, 5) + in.CommitRange(1, 4) + in.CommitRange(1, 10) + + // Should get 3 back + if in.Committed().Len() != 3 { + t.Fatalf("expected all 3 to commit") + } +} + +func TestInflight_CommitRange(t *testing.T) { + commitCh := make(chan struct{}, 1) + in := newInflight(commitCh) + + // Commit a few transaction as being in flight + l1 := &logFuture{log: Log{Index: 2}} + l1.policy = newMajorityQuorum(5) + in.Start(l1) + + l2 := &logFuture{log: Log{Index: 3}} + l2.policy = newMajorityQuorum(5) + in.Start(l2) + + l3 := &logFuture{log: Log{Index: 4}} + l3.policy = newMajorityQuorum(5) + in.Start(l3) + + // Commit ranges + in.CommitRange(1, 5) + in.CommitRange(1, 4) + in.CommitRange(1, 10) + + // Should get 3 back + if in.Committed().Len() != 3 { + t.Fatalf("expected all 3 to commit") + } +} + +// Should panic if we commit non contiguously! +func TestInflight_NonContiguous(t *testing.T) { + commitCh := make(chan struct{}, 1) + in := newInflight(commitCh) + + // Commit a few transaction as being in flight + l1 := &logFuture{log: Log{Index: 2}} + l1.policy = newMajorityQuorum(5) + in.Start(l1) + + l2 := &logFuture{log: Log{Index: 3}} + l2.policy = newMajorityQuorum(5) + in.Start(l2) + + in.Commit(3) + in.Commit(3) + in.Commit(3) // panic! + + if in.Committed().Len() != 0 { + t.Fatalf("should not commit") + } + + in.Commit(2) + in.Commit(2) + in.Commit(2) // panic! + + committed := in.Committed() + if committed.Len() != 2 { + t.Fatalf("should commit both") + } + + current := committed.Front() + l := current.Value.(*logFuture) + if l.log.Index != 2 { + t.Fatalf("bad: %v", *l) + } + + current = current.Next() + l = current.Value.(*logFuture) + if l.log.Index != 3 { + t.Fatalf("bad: %v", *l) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go new file mode 100644 index 000000000..6e4dfd020 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go @@ -0,0 +1,116 @@ +package raft + +import ( + "sync" +) + +// InmemStore implements the LogStore and StableStore interface. +// It should NOT EVER be used for production. It is used only for +// unit tests. Use the MDBStore implementation instead. +type InmemStore struct { + l sync.RWMutex + lowIndex uint64 + highIndex uint64 + logs map[uint64]*Log + kv map[string][]byte + kvInt map[string]uint64 +} + +// NewInmemStore returns a new in-memory backend. Do not ever +// use for production. Only for testing. +func NewInmemStore() *InmemStore { + i := &InmemStore{ + logs: make(map[uint64]*Log), + kv: make(map[string][]byte), + kvInt: make(map[string]uint64), + } + return i +} + +// FirstIndex implements the LogStore interface. +func (i *InmemStore) FirstIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.lowIndex, nil +} + +// LastIndex implements the LogStore interface. +func (i *InmemStore) LastIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.highIndex, nil +} + +// GetLog implements the LogStore interface. +func (i *InmemStore) GetLog(index uint64, log *Log) error { + i.l.RLock() + defer i.l.RUnlock() + l, ok := i.logs[index] + if !ok { + return ErrLogNotFound + } + *log = *l + return nil +} + +// StoreLog implements the LogStore interface. +func (i *InmemStore) StoreLog(log *Log) error { + return i.StoreLogs([]*Log{log}) +} + +// StoreLogs implements the LogStore interface. +func (i *InmemStore) StoreLogs(logs []*Log) error { + i.l.Lock() + defer i.l.Unlock() + for _, l := range logs { + i.logs[l.Index] = l + if i.lowIndex == 0 { + i.lowIndex = l.Index + } + if l.Index > i.highIndex { + i.highIndex = l.Index + } + } + return nil +} + +// DeleteRange implements the LogStore interface. +func (i *InmemStore) DeleteRange(min, max uint64) error { + i.l.Lock() + defer i.l.Unlock() + for j := min; j <= max; j++ { + delete(i.logs, j) + } + i.lowIndex = max + 1 + return nil +} + +// Set implements the StableStore interface. +func (i *InmemStore) Set(key []byte, val []byte) error { + i.l.Lock() + defer i.l.Unlock() + i.kv[string(key)] = val + return nil +} + +// Get implements the StableStore interface. +func (i *InmemStore) Get(key []byte) ([]byte, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kv[string(key)], nil +} + +// SetUint64 implements the StableStore interface. +func (i *InmemStore) SetUint64(key []byte, val uint64) error { + i.l.Lock() + defer i.l.Unlock() + i.kvInt[string(key)] = val + return nil +} + +// GetUint64 implements the StableStore interface. +func (i *InmemStore) GetUint64(key []byte) (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kvInt[string(key)], nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go new file mode 100644 index 000000000..994d06d8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go @@ -0,0 +1,315 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// NewInmemAddr returns a new in-memory addr with +// a randomly generate UUID as the ID. +func NewInmemAddr() string { + return generateUUID() +} + +// inmemPipeline is used to pipeline requests for the in-mem transport. +type inmemPipeline struct { + trans *InmemTransport + peer *InmemTransport + peerAddr string + + doneCh chan AppendFuture + inprogressCh chan *inmemPipelineInflight + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +type inmemPipelineInflight struct { + future *appendFuture + respCh <-chan RPCResponse +} + +// InmemTransport Implements the Transport interface, to allow Raft to be +// tested in-memory without going over a network. +type InmemTransport struct { + sync.RWMutex + consumerCh chan RPC + localAddr string + peers map[string]*InmemTransport + pipelines []*inmemPipeline + timeout time.Duration +} + +// NewInmemTransport is used to initialize a new transport +// and generates a random local address. +func NewInmemTransport() (string, *InmemTransport) { + addr := NewInmemAddr() + trans := &InmemTransport{ + consumerCh: make(chan RPC, 16), + localAddr: addr, + peers: make(map[string]*InmemTransport), + timeout: 50 * time.Millisecond, + } + return addr, trans +} + +// SetHeartbeatHandler is used to set optional fast-path for +// heartbeats, not supported for this transport. +func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { +} + +// Consumer implements the Transport interface. +func (i *InmemTransport) Consumer() <-chan RPC { + return i.consumerCh +} + +// LocalAddr implements the Transport interface. +func (i *InmemTransport) LocalAddr() string { + return i.localAddr +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (i *InmemTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + if !ok { + return nil, fmt.Errorf("failed to connect to peer: %v", target) + } + pipeline := newInmemPipeline(i, peer, target) + i.Lock() + i.pipelines = append(i.pipelines, pipeline) + i.Unlock() + return pipeline, nil +} + +// AppendEntries implements the Transport interface. +func (i *InmemTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*AppendEntriesResponse) + *resp = *out + return nil +} + +// RequestVote implements the Transport interface. +func (i *InmemTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestVoteResponse) + *resp = *out + return nil +} + +// InstallSnapshot implements the Transport interface. +func (i *InmemTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*InstallSnapshotResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) makeRPC(target string, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + + if !ok { + err = fmt.Errorf("failed to connect to peer: %v", target) + return + } + + // Send the RPC over + respCh := make(chan RPCResponse) + peer.consumerCh <- RPC{ + Command: args, + Reader: r, + RespChan: respCh, + } + + // Wait for a response + select { + case rpcResp = <-respCh: + if rpcResp.Error != nil { + err = rpcResp.Error + } + case <-time.After(timeout): + err = fmt.Errorf("command timed out") + } + return +} + +// EncodePeer implements the Transport interface. It uses the UUID as the +// address directly. +func (i *InmemTransport) EncodePeer(p string) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. It wraps the UUID in an +// InmemAddr. +func (i *InmemTransport) DecodePeer(buf []byte) string { + return string(buf) +} + +// Connect is used to connect this transport to another transport for +// a given peer name. This allows for local routing. +func (i *InmemTransport) Connect(peer string, trans *InmemTransport) { + i.Lock() + defer i.Unlock() + i.peers[peer] = trans +} + +// Disconnect is used to remove the ability to route to a given peer. +func (i *InmemTransport) Disconnect(peer string) { + i.Lock() + defer i.Unlock() + delete(i.peers, peer) + + // Disconnect any pipelines + n := len(i.pipelines) + for idx := 0; idx < n; idx++ { + if i.pipelines[idx].peerAddr == peer { + i.pipelines[idx].Close() + i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil + idx-- + n-- + } + } + i.pipelines = i.pipelines[:n] +} + +// DisconnectAll is used to remove all routes to peers. +func (i *InmemTransport) DisconnectAll() { + i.Lock() + defer i.Unlock() + i.peers = make(map[string]*InmemTransport) + + // Handle pipelines + for _, pipeline := range i.pipelines { + pipeline.Close() + } + i.pipelines = nil +} + +func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr string) *inmemPipeline { + i := &inmemPipeline{ + trans: trans, + peer: peer, + peerAddr: addr, + doneCh: make(chan AppendFuture, 16), + inprogressCh: make(chan *inmemPipelineInflight, 16), + shutdownCh: make(chan struct{}), + } + go i.decodeResponses() + return i +} + +func (i *inmemPipeline) decodeResponses() { + timeout := i.trans.timeout + for { + select { + case inp := <-i.inprogressCh: + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + + select { + case rpcResp := <-inp.respCh: + // Copy the result back + *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) + inp.future.respond(rpcResp.Error) + + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-timeoutCh: + inp.future.respond(fmt.Errorf("command timed out")) + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-i.shutdownCh: + return + } + case <-i.shutdownCh: + return + } + } +} + +func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Handle a timeout + var timeout <-chan time.Time + if i.trans.timeout > 0 { + timeout = time.After(i.trans.timeout) + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + Command: args, + RespChan: respCh, + } + select { + case i.peer.consumerCh <- rpc: + case <-timeout: + return nil, fmt.Errorf("command enqueue timeout") + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } + + // Send to be decoded + select { + case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: + return future, nil + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +func (i *inmemPipeline) Consumer() <-chan AppendFuture { + return i.doneCh +} + +func (i *inmemPipeline) Close() error { + i.shutdownLock.Lock() + defer i.shutdownLock.Unlock() + if i.shutdown { + return nil + } + + i.shutdown = true + close(i.shutdownCh) + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go new file mode 100644 index 000000000..2086a2389 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go @@ -0,0 +1,12 @@ +package raft + +import ( + "testing" +) + +func TestInmemTransportImpl(t *testing.T) { + var inm interface{} = &InmemTransport{} + if _, ok := inm.(Transport); !ok { + t.Fatalf("InmemTransport is not a Transport") + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go new file mode 100644 index 000000000..1d071e139 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go @@ -0,0 +1,266 @@ +package raft + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "testing" + "time" +) + +// CheckInteg will skip a test if integration testing is not enabled. +func CheckInteg(t *testing.T) { + if !IsInteg() { + t.SkipNow() + } +} + +// IsInteg returns a boolean telling you if we're in integ testing mode. +func IsInteg() bool { + return os.Getenv("INTEG_TESTS") != "" +} + +type RaftEnv struct { + dir string + conf *Config + fsm *MockFSM + store *InmemStore + snapshot *FileSnapshotStore + peers *JSONPeers + trans *NetworkTransport + raft *Raft +} + +func (r *RaftEnv) Release() { + log.Printf("[WARN] Release node at %v", r.raft.localAddr) + f := r.raft.Shutdown() + if err := f.Error(); err != nil { + panic(err) + } + r.trans.Close() + os.RemoveAll(r.dir) +} + +func MakeRaft(t *testing.T, conf *Config) *RaftEnv { + env := &RaftEnv{} + + // Set the config + if conf == nil { + conf = inmemConfig() + } + env.conf = conf + + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + env.dir = dir + + stable := NewInmemStore() + env.store = stable + + snap, err := NewFileSnapshotStore(dir, 3, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + env.snapshot = snap + + env.fsm = &MockFSM{} + + trans, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + env.trans = trans + + env.peers = NewJSONPeers(dir, trans) + + log.Printf("[INFO] Starting node at %v", trans.LocalAddr()) + raft, err := NewRaft(conf, env.fsm, stable, stable, snap, env.peers, trans) + if err != nil { + t.Fatalf("err: %v", err) + } + env.raft = raft + return env +} + +func WaitFor(env *RaftEnv, state RaftState) error { + limit := time.Now().Add(200 * time.Millisecond) + for env.raft.State() != state { + if time.Now().Before(limit) { + time.Sleep(10 * time.Millisecond) + } else { + return fmt.Errorf("failed to transition to state %v", state) + } + } + return nil +} + +func WaitForAny(state RaftState, envs []*RaftEnv) (*RaftEnv, error) { + limit := time.Now().Add(200 * time.Millisecond) +CHECK: + for _, env := range envs { + if env.raft.State() == state { + return env, nil + } + } + if time.Now().Before(limit) { + goto WAIT + } + return nil, fmt.Errorf("failed to find node in %v state", state) +WAIT: + time.Sleep(10 * time.Millisecond) + goto CHECK +} + +func WaitFuture(f Future, t *testing.T) error { + timer := time.AfterFunc(200*time.Millisecond, func() { + panic(fmt.Errorf("timeout waiting for future %v", f)) + }) + defer timer.Stop() + return f.Error() +} + +func NoErr(err error, t *testing.T) { + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func CheckConsistent(envs []*RaftEnv, t *testing.T) { + limit := time.Now().Add(400 * time.Millisecond) + first := envs[0] + var err error +CHECK: + l1 := len(first.fsm.logs) + for i := 1; i < len(envs); i++ { + env := envs[i] + l2 := len(env.fsm.logs) + if l1 != l2 { + err = fmt.Errorf("log length mismatch %d %d", l1, l2) + goto ERR + } + for idx, log := range first.fsm.logs { + other := env.fsm.logs[idx] + if bytes.Compare(log, other) != 0 { + err = fmt.Errorf("log %d mismatch %v %v", idx, log, other) + goto ERR + } + } + } + return +ERR: + if time.Now().After(limit) { + t.Fatalf("%v", err) + } + time.Sleep(20 * time.Millisecond) + goto CHECK +} + +// Tests Raft by creating a cluster, growing it to 5 nodes while +// causing various stressful conditions +func TestRaft_Integ(t *testing.T) { + CheckInteg(t) + conf := DefaultConfig() + conf.HeartbeatTimeout = 50 * time.Millisecond + conf.ElectionTimeout = 50 * time.Millisecond + conf.LeaderLeaseTimeout = 50 * time.Millisecond + conf.CommitTimeout = 5 * time.Millisecond + conf.SnapshotThreshold = 100 + conf.TrailingLogs = 10 + conf.EnableSingleNode = true + + // Create a single node + env1 := MakeRaft(t, conf) + NoErr(WaitFor(env1, Leader), t) + + // Do some commits + var futures []Future + for i := 0; i < 100; i++ { + futures = append(futures, env1.raft.Apply([]byte(fmt.Sprintf("test%d", i)), 0)) + } + for _, f := range futures { + NoErr(WaitFuture(f, t), t) + log.Printf("[DEBUG] Applied %v", f) + } + + // Do a snapshot + NoErr(WaitFuture(env1.raft.Snapshot(), t), t) + + // Join a few nodes! + var envs []*RaftEnv + for i := 0; i < 4; i++ { + env := MakeRaft(t, conf) + addr := env.trans.LocalAddr() + NoErr(WaitFuture(env1.raft.AddPeer(addr), t), t) + envs = append(envs, env) + } + + // Wait for a leader + leader, err := WaitForAny(Leader, append([]*RaftEnv{env1}, envs...)) + NoErr(err, t) + + // Do some more commits + futures = nil + for i := 0; i < 100; i++ { + futures = append(futures, leader.raft.Apply([]byte(fmt.Sprintf("test%d", i)), 0)) + } + for _, f := range futures { + NoErr(WaitFuture(f, t), t) + log.Printf("[DEBUG] Applied %v", f) + } + + // Shoot two nodes in the head! + rm1, rm2 := envs[0], envs[1] + rm1.Release() + rm2.Release() + envs = envs[2:] + time.Sleep(10 * time.Millisecond) + + // Wait for a leader + leader, err = WaitForAny(Leader, append([]*RaftEnv{env1}, envs...)) + NoErr(err, t) + + // Do some more commits + futures = nil + for i := 0; i < 100; i++ { + futures = append(futures, leader.raft.Apply([]byte(fmt.Sprintf("test%d", i)), 0)) + } + for _, f := range futures { + NoErr(WaitFuture(f, t), t) + log.Printf("[DEBUG] Applied %v", f) + } + + // Join a few new nodes! + for i := 0; i < 2; i++ { + env := MakeRaft(t, conf) + addr := env.trans.LocalAddr() + NoErr(WaitFuture(leader.raft.AddPeer(addr), t), t) + envs = append(envs, env) + } + + // Remove the old nodes + NoErr(WaitFuture(leader.raft.RemovePeer(rm1.raft.localAddr), t), t) + NoErr(WaitFuture(leader.raft.RemovePeer(rm2.raft.localAddr), t), t) + + // Shoot the leader + env1.Release() + time.Sleep(3 * conf.HeartbeatTimeout) + + // Wait for a leader + leader, err = WaitForAny(Leader, envs) + NoErr(err, t) + + allEnvs := append([]*RaftEnv{env1}, envs...) + CheckConsistent(allEnvs, t) + + if len(env1.fsm.logs) != 300 { + t.Fatalf("should apply 300 logs! %d", len(env1.fsm.logs)) + } + + for _, e := range envs { + e.Release() + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log.go new file mode 100644 index 000000000..a8c5a40ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/log.go @@ -0,0 +1,60 @@ +package raft + +// LogType describes various types of log entries. +type LogType uint8 + +const ( + // LogCommand is applied to a user FSM. + LogCommand LogType = iota + + // LogNoop is used to assert leadership. + LogNoop + + // LogAddPeer is used to add a new peer. + LogAddPeer + + // LogRemovePeer is used to remove an existing peer. + LogRemovePeer + + // LogBarrier is used to ensure all preceding operations have been + // applied to the FSM. It is similar to LogNoop, but instead of returning + // once committed, it only returns once the FSM manager acks it. Otherwise + // it is possible there are operations committed but not yet applied to + // the FSM. + LogBarrier +) + +// Log entries are replicated to all members of the Raft cluster +// and form the heart of the replicated state machine. +type Log struct { + Index uint64 + Term uint64 + Type LogType + Data []byte + + // peer is not exported since it is not transmitted, only used + // internally to construct the Data field. + peer string +} + +// LogStore is used to provide an interface for storing +// and retrieving logs in a durable fashion. +type LogStore interface { + // Returns the first index written. 0 for no entries. + FirstIndex() (uint64, error) + + // Returns the last index written. 0 for no entries. + LastIndex() (uint64, error) + + // Gets a log entry at a given index. + GetLog(index uint64, log *Log) error + + // Stores a log entry. + StoreLog(log *Log) error + + // Stores multiple log entries. + StoreLogs(logs []*Log) error + + // Deletes a range of log entries. The range is inclusive. + DeleteRange(min, max uint64) error +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go new file mode 100644 index 000000000..952e98c22 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go @@ -0,0 +1,79 @@ +package raft + +import ( + "fmt" + "sync" +) + +// LogCache wraps any LogStore implementation to provide an +// in-memory ring buffer. This is used to cache access to +// the recently written entries. For implementations that do not +// cache themselves, this can provide a substantial boost by +// avoiding disk I/O on recent entries. +type LogCache struct { + store LogStore + + cache []*Log + l sync.RWMutex +} + +// NewLogCache is used to create a new LogCache with the +// given capacity and backend store. +func NewLogCache(capacity int, store LogStore) (*LogCache, error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be positive") + } + c := &LogCache{ + store: store, + cache: make([]*Log, capacity), + } + return c, nil +} + +func (c *LogCache) GetLog(idx uint64, log *Log) error { + // Check the buffer for an entry + c.l.RLock() + cached := c.cache[idx%uint64(len(c.cache))] + c.l.RUnlock() + + // Check if entry is valid + if cached != nil && cached.Index == idx { + *log = *cached + return nil + } + + // Forward request on cache miss + return c.store.GetLog(idx, log) +} + +func (c *LogCache) StoreLog(log *Log) error { + return c.StoreLogs([]*Log{log}) +} + +func (c *LogCache) StoreLogs(logs []*Log) error { + // Insert the logs into the ring buffer + c.l.Lock() + for _, l := range logs { + c.cache[l.Index%uint64(len(c.cache))] = l + } + c.l.Unlock() + + return c.store.StoreLogs(logs) +} + +func (c *LogCache) FirstIndex() (uint64, error) { + return c.store.FirstIndex() +} + +func (c *LogCache) LastIndex() (uint64, error) { + return c.store.LastIndex() +} + +func (c *LogCache) DeleteRange(min, max uint64) error { + // Invalidate the cache on deletes + c.l.Lock() + c.cache = make([]*Log, len(c.cache)) + c.l.Unlock() + + return c.store.DeleteRange(min, max) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go new file mode 100644 index 000000000..7569e78ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go @@ -0,0 +1,88 @@ +package raft + +import ( + "testing" +) + +func TestLogCache(t *testing.T) { + store := NewInmemStore() + c, _ := NewLogCache(16, store) + + // Insert into the in-mem store + for i := 0; i < 32; i++ { + log := &Log{Index: uint64(i) + 1} + store.StoreLog(log) + } + + // Check the indexes + if idx, _ := c.FirstIndex(); idx != 1 { + t.Fatalf("bad: %d", idx) + } + if idx, _ := c.LastIndex(); idx != 32 { + t.Fatalf("bad: %d", idx) + } + + // Try get log with a miss + var out Log + err := c.GetLog(1, &out) + if err != nil { + t.Fatalf("err: %v", err) + } + if out.Index != 1 { + t.Fatalf("bad: %#v", out) + } + + // Store logs + l1 := &Log{Index: 33} + l2 := &Log{Index: 34} + err = c.StoreLogs([]*Log{l1, l2}) + if err != nil { + t.Fatalf("err: %v", err) + } + + if idx, _ := c.LastIndex(); idx != 34 { + t.Fatalf("bad: %d", idx) + } + + // Check that it wrote-through + err = store.GetLog(33, &out) + if err != nil { + t.Fatalf("err: %v", err) + } + err = store.GetLog(34, &out) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Delete in the backend + err = store.DeleteRange(33, 34) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should be in the ring buffer + err = c.GetLog(33, &out) + if err != nil { + t.Fatalf("err: %v", err) + } + err = c.GetLog(34, &out) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Purge the ring buffer + err = c.DeleteRange(33, 34) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should not be in the ring buffer + err = c.GetLog(33, &out) + if err != ErrLogNotFound { + t.Fatalf("err: %v", err) + } + err = c.GetLog(34, &out) + if err != ErrLogNotFound { + t.Fatalf("err: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go new file mode 100644 index 000000000..3f3ed31fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go @@ -0,0 +1,606 @@ +package raft + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "sync" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +const ( + rpcAppendEntries uint8 = iota + rpcRequestVote + rpcInstallSnapshot + + // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. + DefaultTimeoutScale = 256 * 1024 // 256KB + + // rpcMaxPipeline controls the maximum number of outstanding + // AppendEntries RPC calls. + rpcMaxPipeline = 128 +) + +var ( + // ErrTransportShutdown is returned when operations on a transport are + // invoked after it's been terminated. + ErrTransportShutdown = errors.New("transport shutdown") + + // ErrPipelineShutdown is returned when the pipeline is closed. + ErrPipelineShutdown = errors.New("append pipeline closed") +) + +/* + +NetworkTransport provides a network based transport that can be +used to communicate with Raft on remote machines. It requires +an underlying stream layer to provide a stream abstraction, which can +be simple TCP, TLS, etc. + +This transport is very simple and lightweight. Each RPC request is +framed by sending a byte that indicates the message type, followed +by the MsgPack encoded request. + +The response is an error string followed by the response object, +both are encoded using MsgPack. + +InstallSnapshot is special, in that after the RPC request we stream +the entire state. That socket is not re-used as the connection state +is not known if there is an error. + +*/ +type NetworkTransport struct { + connPool map[string][]*netConn + connPoolLock sync.Mutex + + consumeCh chan RPC + + heartbeatFn func(RPC) + heartbeatFnLock sync.Mutex + + logger *log.Logger + + maxPool int + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + stream StreamLayer + + timeout time.Duration + TimeoutScale int +} + +// StreamLayer is used with the NetworkTransport to provide +// the low level stream abstraction. +type StreamLayer interface { + net.Listener + + // Dial is used to create a new outgoing connection + Dial(address string, timeout time.Duration) (net.Conn, error) +} + +type netConn struct { + target string + conn net.Conn + r *bufio.Reader + w *bufio.Writer + dec *codec.Decoder + enc *codec.Encoder +} + +func (n *netConn) Release() error { + return n.conn.Close() +} + +type netPipeline struct { + conn *netConn + trans *NetworkTransport + + doneCh chan AppendFuture + inprogressCh chan *appendFuture + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// NewNetworkTransport creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransport( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) *NetworkTransport { + if logOutput == nil { + logOutput = os.Stderr + } + trans := &NetworkTransport{ + connPool: make(map[string][]*netConn), + consumeCh: make(chan RPC), + logger: log.New(logOutput, "", log.LstdFlags), + maxPool: maxPool, + shutdownCh: make(chan struct{}), + stream: stream, + timeout: timeout, + TimeoutScale: DefaultTimeoutScale, + } + go trans.listen() + return trans +} + +// SetHeartbeatHandler is used to setup a heartbeat handler +// as a fast-pass. This is to avoid head-of-line blocking from +// disk IO. +func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { + n.heartbeatFnLock.Lock() + defer n.heartbeatFnLock.Unlock() + n.heartbeatFn = cb +} + +// Close is used to stop the network transport. +func (n *NetworkTransport) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + + if !n.shutdown { + close(n.shutdownCh) + n.stream.Close() + n.shutdown = true + } + return nil +} + +// Consumer implements the Transport interface. +func (n *NetworkTransport) Consumer() <-chan RPC { + return n.consumeCh +} + +// LocalAddr implements the Transport interface. +func (n *NetworkTransport) LocalAddr() string { + return n.stream.Addr().String() +} + +// IsShutdown is used to check if the transport is shutdown. +func (n *NetworkTransport) IsShutdown() bool { + select { + case <-n.shutdownCh: + return true + default: + return false + } +} + +// getExistingConn is used to grab a pooled connection. +func (n *NetworkTransport) getPooledConn(target string) *netConn { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + conns, ok := n.connPool[target] + if !ok || len(conns) == 0 { + return nil + } + + var conn *netConn + num := len(conns) + conn, conns[num-1] = conns[num-1], nil + n.connPool[target] = conns[:num-1] + return conn +} + +// getConn is used to get a connection from the pool. +func (n *NetworkTransport) getConn(target string) (*netConn, error) { + // Check for a pooled conn + if conn := n.getPooledConn(target); conn != nil { + return conn, nil + } + + // Dial a new connection + conn, err := n.stream.Dial(target, n.timeout) + if err != nil { + return nil, err + } + + // Wrap the conn + netConn := &netConn{ + target: target, + conn: conn, + r: bufio.NewReader(conn), + w: bufio.NewWriter(conn), + } + + // Setup encoder/decoders + netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) + netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) + + // Done + return netConn, nil +} + +// returnConn returns a connection back to the pool. +func (n *NetworkTransport) returnConn(conn *netConn) { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + key := conn.target + conns, _ := n.connPool[key] + + if !n.IsShutdown() && len(conns) < n.maxPool { + n.connPool[key] = append(conns, conn) + } else { + conn.Release() + } +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (n *NetworkTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { + // Get a connection + conn, err := n.getConn(target) + if err != nil { + return nil, err + } + + // Create the pipeline + return newNetPipeline(n, conn), nil +} + +// AppendEntries implements the Transport interface. +func (n *NetworkTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + return n.genericRPC(target, rpcAppendEntries, args, resp) +} + +// RequestVote implements the Transport interface. +func (n *NetworkTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { + return n.genericRPC(target, rpcRequestVote, args, resp) +} + +// genericRPC handles a simple request/response RPC. +func (n *NetworkTransport) genericRPC(target string, rpcType uint8, args interface{}, resp interface{}) error { + // Get a conn + conn, err := n.getConn(target) + if err != nil { + return err + } + + // Set a deadline + if n.timeout > 0 { + conn.conn.SetDeadline(time.Now().Add(n.timeout)) + } + + // Send the RPC + if err := sendRPC(conn, rpcType, args); err != nil { + return err + } + + // Decode the response + canReturn, err := decodeResponse(conn, resp) + if canReturn { + n.returnConn(conn) + } + return err +} + +// InstallSnapshot implements the Transport interface. +func (n *NetworkTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + // Get a conn, always close for InstallSnapshot + conn, err := n.getConn(target) + if err != nil { + return err + } + defer conn.Release() + + // Set a deadline, scaled by request size + if n.timeout > 0 { + timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) + if timeout < n.timeout { + timeout = n.timeout + } + conn.conn.SetDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(conn, rpcInstallSnapshot, args); err != nil { + return err + } + + // Stream the state + if _, err := io.Copy(conn.w, data); err != nil { + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + return err + } + + // Decode the response, do not return conn + _, err = decodeResponse(conn, resp) + return err +} + +// EncodePeer implements the Transport interface. +func (n *NetworkTransport) EncodePeer(p string) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (n *NetworkTransport) DecodePeer(buf []byte) string { + return string(buf) +} + +// listen is used to handling incoming connections. +func (n *NetworkTransport) listen() { + for { + // Accept incoming connections + conn, err := n.stream.Accept() + if err != nil { + if n.IsShutdown() { + return + } + n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err) + continue + } + n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr()) + + // Handle the connection in dedicated routine + go n.handleConn(conn) + } +} + +// handleConn is used to handle an inbound connection for its lifespan. +func (n *NetworkTransport) handleConn(conn net.Conn) { + defer conn.Close() + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) + + for { + if err := n.handleCommand(r, dec, enc); err != nil { + if err != io.EOF { + n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) + } + return + } + if err := w.Flush(); err != nil { + n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) + return + } + } +} + +// handleCommand is used to decode and dispatch a single command. +func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { + // Get the rpc type + rpcType, err := r.ReadByte() + if err != nil { + return err + } + + // Create the RPC object + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + RespChan: respCh, + } + + // Decode the command + isHeartbeat := false + switch rpcType { + case rpcAppendEntries: + var req AppendEntriesRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + // Check if this is a heartbeat + if req.Term != 0 && req.Leader != nil && + req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && + len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { + isHeartbeat = true + } + + case rpcRequestVote: + var req RequestVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + case rpcInstallSnapshot: + var req InstallSnapshotRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + rpc.Reader = io.LimitReader(r, req.Size) + + default: + return fmt.Errorf("unknown rpc type %d", rpcType) + } + + // Check for heartbeat fast-path + if isHeartbeat { + n.heartbeatFnLock.Lock() + fn := n.heartbeatFn + n.heartbeatFnLock.Unlock() + if fn != nil { + fn(rpc) + goto RESP + } + } + + // Dispatch the RPC + select { + case n.consumeCh <- rpc: + case <-n.shutdownCh: + return ErrTransportShutdown + } + + // Wait for response +RESP: + select { + case resp := <-respCh: + // Send the error first + respErr := "" + if resp.Error != nil { + respErr = resp.Error.Error() + } + if err := enc.Encode(respErr); err != nil { + return err + } + + // Send the response + if err := enc.Encode(resp.Response); err != nil { + return err + } + case <-n.shutdownCh: + return ErrTransportShutdown + } + return nil +} + +// decodeResponse is used to decode an RPC response and reports whether +// the connection can be reused. +func decodeResponse(conn *netConn, resp interface{}) (bool, error) { + // Decode the error if any + var rpcError string + if err := conn.dec.Decode(&rpcError); err != nil { + conn.Release() + return false, err + } + + // Decode the response + if err := conn.dec.Decode(resp); err != nil { + conn.Release() + return false, err + } + + // Format an error if any + if rpcError != "" { + return true, fmt.Errorf(rpcError) + } + return true, nil +} + +// sendRPC is used to encode and send the RPC. +func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { + // Write the request type + if err := conn.w.WriteByte(rpcType); err != nil { + conn.Release() + return err + } + + // Send the request + if err := conn.enc.Encode(args); err != nil { + conn.Release() + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + conn.Release() + return err + } + return nil +} + +// newNetPipeline is used to construct a netPipeline from a given +// transport and connection. +func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { + n := &netPipeline{ + conn: conn, + trans: trans, + doneCh: make(chan AppendFuture, rpcMaxPipeline), + inprogressCh: make(chan *appendFuture, rpcMaxPipeline), + shutdownCh: make(chan struct{}), + } + go n.decodeResponses() + return n +} + +// decodeResponses is a long running routine that decodes the responses +// sent on the connection. +func (n *netPipeline) decodeResponses() { + timeout := n.trans.timeout + for { + select { + case future := <-n.inprogressCh: + if timeout > 0 { + n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + + _, err := decodeResponse(n.conn, future.resp) + future.respond(err) + select { + case n.doneCh <- future: + case <-n.shutdownCh: + return + } + case <-n.shutdownCh: + return + } + } +} + +// AppendEntries is used to pipeline a new append entries request. +func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Add a send timeout + if timeout := n.trans.timeout; timeout > 0 { + n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { + return nil, err + } + + // Hand-off for decoding, this can also cause back-pressure + // to prevent too many inflight requests + select { + case n.inprogressCh <- future: + return future, nil + case <-n.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +// Consumer returns a channel that can be used to consume complete futures. +func (n *netPipeline) Consumer() <-chan AppendFuture { + return n.doneCh +} + +// Closed is used to shutdown the pipeline connection. +func (n *netPipeline) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + if n.shutdown { + return nil + } + + // Release the connection + n.conn.Release() + + n.shutdown = true + close(n.shutdownCh) + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go new file mode 100644 index 000000000..0127ac55e --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go @@ -0,0 +1,449 @@ +package raft + +import ( + "bytes" + "reflect" + "sync" + "testing" + "time" +) + +func TestNetworkTransport_StartStop(t *testing.T) { + trans, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + trans.Close() +} + +func TestNetworkTransport_Heartbeat_FastPath(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + + // Make the RPC request + args := AppendEntriesRequest{ + Term: 10, + Leader: []byte("cartman"), + } + resp := AppendEntriesResponse{ + Term: 4, + LastLog: 90, + Success: true, + } + + invoked := false + fastpath := func(rpc RPC) { + // Verify the command + req := rpc.Command.(*AppendEntriesRequest) + if !reflect.DeepEqual(req, &args) { + t.Fatalf("command mismatch: %#v %#v", *req, args) + } + + rpc.Respond(&resp, nil) + invoked = true + } + trans1.SetHeartbeatHandler(fastpath) + + // Transport 2 makes outbound request + trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans2.Close() + + var out AppendEntriesResponse + if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Verify the response + if !reflect.DeepEqual(resp, out) { + t.Fatalf("command mismatch: %#v %#v", resp, out) + } + + // Ensure fast-path is used + if !invoked { + t.Fatalf("fast-path not used") + } +} + +func TestNetworkTransport_AppendEntries(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + rpcCh := trans1.Consumer() + + // Make the RPC request + args := AppendEntriesRequest{ + Term: 10, + Leader: []byte("cartman"), + PrevLogEntry: 100, + PrevLogTerm: 4, + Entries: []*Log{ + &Log{ + Index: 101, + Term: 4, + Type: LogNoop, + }, + }, + LeaderCommitIndex: 90, + } + resp := AppendEntriesResponse{ + Term: 4, + LastLog: 90, + Success: true, + } + + // Listen for a request + go func() { + select { + case rpc := <-rpcCh: + // Verify the command + req := rpc.Command.(*AppendEntriesRequest) + if !reflect.DeepEqual(req, &args) { + t.Fatalf("command mismatch: %#v %#v", *req, args) + } + + rpc.Respond(&resp, nil) + + case <-time.After(200 * time.Millisecond): + t.Fatalf("timeout") + } + }() + + // Transport 2 makes outbound request + trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans2.Close() + + var out AppendEntriesResponse + if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Verify the response + if !reflect.DeepEqual(resp, out) { + t.Fatalf("command mismatch: %#v %#v", resp, out) + } +} + +func TestNetworkTransport_AppendEntriesPipeline(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + rpcCh := trans1.Consumer() + + // Make the RPC request + args := AppendEntriesRequest{ + Term: 10, + Leader: []byte("cartman"), + PrevLogEntry: 100, + PrevLogTerm: 4, + Entries: []*Log{ + &Log{ + Index: 101, + Term: 4, + Type: LogNoop, + }, + }, + LeaderCommitIndex: 90, + } + resp := AppendEntriesResponse{ + Term: 4, + LastLog: 90, + Success: true, + } + + // Listen for a request + go func() { + for i := 0; i < 10; i++ { + select { + case rpc := <-rpcCh: + // Verify the command + req := rpc.Command.(*AppendEntriesRequest) + if !reflect.DeepEqual(req, &args) { + t.Fatalf("command mismatch: %#v %#v", *req, args) + } + rpc.Respond(&resp, nil) + + case <-time.After(200 * time.Millisecond): + t.Fatalf("timeout") + } + } + }() + + // Transport 2 makes outbound request + trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans2.Close() + + pipeline, err := trans2.AppendEntriesPipeline(trans1.LocalAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + defer pipeline.Close() + for i := 0; i < 10; i++ { + out := new(AppendEntriesResponse) + if _, err := pipeline.AppendEntries(&args, out); err != nil { + t.Fatalf("err: %v", err) + } + } + + respCh := pipeline.Consumer() + for i := 0; i < 10; i++ { + select { + case ready := <-respCh: + // Verify the response + if !reflect.DeepEqual(&resp, ready.Response()) { + t.Fatalf("command mismatch: %#v %#v", &resp, ready.Response()) + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("timeout") + } + } +} + +func TestNetworkTransport_RequestVote(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + rpcCh := trans1.Consumer() + + // Make the RPC request + args := RequestVoteRequest{ + Term: 20, + Candidate: []byte("butters"), + LastLogIndex: 100, + LastLogTerm: 19, + } + resp := RequestVoteResponse{ + Term: 100, + Peers: []byte("blah"), + Granted: false, + } + + // Listen for a request + go func() { + select { + case rpc := <-rpcCh: + // Verify the command + req := rpc.Command.(*RequestVoteRequest) + if !reflect.DeepEqual(req, &args) { + t.Fatalf("command mismatch: %#v %#v", *req, args) + } + + rpc.Respond(&resp, nil) + + case <-time.After(200 * time.Millisecond): + t.Fatalf("timeout") + } + }() + + // Transport 2 makes outbound request + trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans2.Close() + + var out RequestVoteResponse + if err := trans2.RequestVote(trans1.LocalAddr(), &args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Verify the response + if !reflect.DeepEqual(resp, out) { + t.Fatalf("command mismatch: %#v %#v", resp, out) + } +} + +func TestNetworkTransport_InstallSnapshot(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + rpcCh := trans1.Consumer() + + // Make the RPC request + args := InstallSnapshotRequest{ + Term: 10, + Leader: []byte("kyle"), + LastLogIndex: 100, + LastLogTerm: 9, + Peers: []byte("blah blah"), + Size: 10, + } + resp := InstallSnapshotResponse{ + Term: 10, + Success: true, + } + + // Listen for a request + go func() { + select { + case rpc := <-rpcCh: + // Verify the command + req := rpc.Command.(*InstallSnapshotRequest) + if !reflect.DeepEqual(req, &args) { + t.Fatalf("command mismatch: %#v %#v", *req, args) + } + + // Try to read the bytes + buf := make([]byte, 10) + rpc.Reader.Read(buf) + + // Compare + if bytes.Compare(buf, []byte("0123456789")) != 0 { + t.Fatalf("bad buf %v", buf) + } + + rpc.Respond(&resp, nil) + + case <-time.After(200 * time.Millisecond): + t.Fatalf("timeout") + } + }() + + // Transport 2 makes outbound request + trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans2.Close() + + // Create a buffer + buf := bytes.NewBuffer([]byte("0123456789")) + + var out InstallSnapshotResponse + if err := trans2.InstallSnapshot(trans1.LocalAddr(), &args, &out, buf); err != nil { + t.Fatalf("err: %v", err) + } + + // Verify the response + if !reflect.DeepEqual(resp, out) { + t.Fatalf("command mismatch: %#v %#v", resp, out) + } +} + +func TestNetworkTransport_EncodeDecode(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + + local := trans1.LocalAddr() + enc := trans1.EncodePeer(local) + dec := trans1.DecodePeer(enc) + + if dec != local { + t.Fatalf("enc/dec fail: %v %v", dec, local) + } +} + +func TestNetworkTransport_PooledConn(t *testing.T) { + // Transport 1 is consumer + trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans1.Close() + rpcCh := trans1.Consumer() + + // Make the RPC request + args := AppendEntriesRequest{ + Term: 10, + Leader: []byte("cartman"), + PrevLogEntry: 100, + PrevLogTerm: 4, + Entries: []*Log{ + &Log{ + Index: 101, + Term: 4, + Type: LogNoop, + }, + }, + LeaderCommitIndex: 90, + } + resp := AppendEntriesResponse{ + Term: 4, + LastLog: 90, + Success: true, + } + + // Listen for a request + go func() { + for { + select { + case rpc := <-rpcCh: + // Verify the command + req := rpc.Command.(*AppendEntriesRequest) + if !reflect.DeepEqual(req, &args) { + t.Fatalf("command mismatch: %#v %#v", *req, args) + } + rpc.Respond(&resp, nil) + + case <-time.After(200 * time.Millisecond): + return + } + } + }() + + // Transport 2 makes outbound request, 3 conn pool + trans2, err := NewTCPTransport("127.0.0.1:0", nil, 3, time.Second, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer trans2.Close() + + // Create wait group + wg := &sync.WaitGroup{} + wg.Add(5) + + appendFunc := func() { + defer wg.Done() + var out AppendEntriesResponse + if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Verify the response + if !reflect.DeepEqual(resp, out) { + t.Fatalf("command mismatch: %#v %#v", resp, out) + } + } + + // Try to do parallel appends, should stress the conn pool + for i := 0; i < 5; i++ { + go appendFunc() + } + + // Wait for the routines to finish + wg.Wait() + + // Check the conn pool size + addr := trans1.LocalAddr() + if len(trans2.connPool[addr]) != 3 { + t.Fatalf("Expected 2 pooled conns!") + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go b/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go new file mode 100644 index 000000000..6f3bcf856 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go @@ -0,0 +1,122 @@ +package raft + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +const ( + jsonPeerPath = "peers.json" +) + +// PeerStore provides an interface for persistent storage and +// retrieval of peers. We use a separate interface than StableStore +// since the peers may need to be edited by a human operator. For example, +// in a two node cluster, the failure of either node requires human intervention +// since consensus is impossible. +type PeerStore interface { + // Peers returns the list of known peers. + Peers() ([]string, error) + + // SetPeers sets the list of known peers. This is invoked when a peer is + // added or removed. + SetPeers([]string) error +} + +// StaticPeers is used to provide a static list of peers. +type StaticPeers struct { + StaticPeers []string + l sync.Mutex +} + +// Peers implements the PeerStore interface. +func (s *StaticPeers) Peers() ([]string, error) { + s.l.Lock() + peers := s.StaticPeers + s.l.Unlock() + return peers, nil +} + +// SetPeers implements the PeerStore interface. +func (s *StaticPeers) SetPeers(p []string) error { + s.l.Lock() + s.StaticPeers = p + s.l.Unlock() + return nil +} + +// JSONPeers is used to provide peer persistence on disk in the form +// of a JSON file. This allows human operators to manipulate the file. +type JSONPeers struct { + l sync.Mutex + path string + trans Transport +} + +// NewJSONPeers creates a new JSONPeers store. Requires a transport +// to handle the serialization of network addresses. +func NewJSONPeers(base string, trans Transport) *JSONPeers { + path := filepath.Join(base, jsonPeerPath) + store := &JSONPeers{ + path: path, + trans: trans, + } + return store +} + +// Peers implements the PeerStore interface. +func (j *JSONPeers) Peers() ([]string, error) { + j.l.Lock() + defer j.l.Unlock() + + // Read the file + buf, err := ioutil.ReadFile(j.path) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + // Check for no peers + if len(buf) == 0 { + return nil, nil + } + + // Decode the peers + var peerSet []string + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peerSet); err != nil { + return nil, err + } + + // Deserialize each peer + var peers []string + for _, p := range peerSet { + peers = append(peers, j.trans.DecodePeer([]byte(p))) + } + return peers, nil +} + +// SetPeers implements the PeerStore interface. +func (j *JSONPeers) SetPeers(peers []string) error { + j.l.Lock() + defer j.l.Unlock() + + // Encode each peer + var peerSet []string + for _, p := range peers { + peerSet = append(peerSet, string(j.trans.EncodePeer(p))) + } + + // Convert to JSON + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(peerSet); err != nil { + return err + } + + // Write out as JSON + return ioutil.WriteFile(j.path, buf.Bytes(), 0755) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go new file mode 100644 index 000000000..1cb1159e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go @@ -0,0 +1,44 @@ +package raft + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestJSONPeers(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + // Create the store + _, trans := NewInmemTransport() + store := NewJSONPeers(dir, trans) + + // Try a read, should get nothing + peers, err := store.Peers() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(peers) != 0 { + t.Fatalf("peers: %v", peers) + } + + // Initialize some peers + newPeers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} + if err := store.SetPeers(newPeers); err != nil { + t.Fatalf("err: %v", err) + } + + // Try a read, should peers + peers, err = store.Peers() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(peers) != 3 { + t.Fatalf("peers: %v", peers) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go b/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go new file mode 100644 index 000000000..2fdc6d796 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go @@ -0,0 +1,1781 @@ +package raft + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "os" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +const ( + minCheckInterval = 10 * time.Millisecond +) + +var ( + keyCurrentTerm = []byte("CurrentTerm") + keyLastVoteTerm = []byte("LastVoteTerm") + keyLastVoteCand = []byte("LastVoteCand") + + // ErrLeader is returned when an operation can't be completed on a + // leader node. + ErrLeader = errors.New("node is the leader") + + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + + // ErrLeadershipLost is returned when a leader fails to commit a log entry + // because it's been deposed in the process. + ErrLeadershipLost = errors.New("leadership lost while committing log") + + // ErrRaftShutdown is returned when operations are requested against an + // inactive Raft. + ErrRaftShutdown = errors.New("raft is already shutdown") + + // ErrEnqueueTimeout is returned when a command fails due to a timeout. + ErrEnqueueTimeout = errors.New("timed out enqueuing operation") + + // ErrKnownPeer is returned when trying to add a peer to the configuration + // that already exists. + ErrKnownPeer = errors.New("peer already known") + + // ErrUnknownPeer is returned when trying to remove a peer from the + // configuration that doesn't exist. + ErrUnknownPeer = errors.New("peer is unknown") +) + +// commitTuple is used to send an index that was committed, +// with an optional associated future that should be invoked. +type commitTuple struct { + log *Log + future *logFuture +} + +// leaderState is state that is used while we are a leader. +type leaderState struct { + commitCh chan struct{} + inflight *inflight + replState map[string]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} +} + +// Raft implements a Raft node. +type Raft struct { + raftState + + // applyCh is used to async send logs to the main thread to + // be committed and applied to the FSM. + applyCh chan *logFuture + + // Configuration provided at Raft initialization + conf *Config + + // FSM is the client state machine to apply commands to + fsm FSM + + // fsmCommitCh is used to trigger async application of logs to the fsm + fsmCommitCh chan commitTuple + + // fsmRestoreCh is used to trigger a restore from snapshot + fsmRestoreCh chan *restoreFuture + + // fsmSnapshotCh is used to trigger a new snapshot being taken + fsmSnapshotCh chan *reqSnapshotFuture + + // lastContact is the last time we had contact from the + // leader node. This can be used to gauge staleness. + lastContact time.Time + lastContactLock sync.RWMutex + + // Leader is the current cluster leader + leader string + leaderLock sync.RWMutex + + // leaderCh is used to notify of leadership changes + leaderCh chan bool + + // leaderState used only while state is leader + leaderState leaderState + + // Stores our local addr + localAddr string + + // Used for our logging + logger *log.Logger + + // LogStore provides durable storage for logs + logs LogStore + + // Track our known peers + peerCh chan *peerFuture + peers []string + peerStore PeerStore + + // RPC chan comes from the transport layer + rpcCh <-chan RPC + + // Shutdown channel to exit, protected to prevent concurrent exits + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + // snapshots is used to store and retrieve snapshots + snapshots SnapshotStore + + // snapshotCh is used for user triggered snapshots + snapshotCh chan *snapshotFuture + + // stable is a StableStore implementation for durable state + // It provides stable storage for many fields in raftState + stable StableStore + + // The transport layer we use + trans Transport + + // verifyCh is used to async send verify futures to the main thread + // to verify we are still the leader + verifyCh chan *verifyFuture +} + +// NewRaft is used to construct a new Raft node. It takes a configuration, as well +// as implementations of various interfaces that are required. If we have any old state, +// such as snapshots, logs, peers, etc, all those will be restored when creating the +// Raft node. +func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, + peerStore PeerStore, trans Transport) (*Raft, error) { + // Validate the configuration + if err := ValidateConfig(conf); err != nil { + return nil, err + } + + // Ensure we have a LogOutput + var logger *log.Logger + if conf.Logger != nil { + logger = conf.Logger + } else { + if conf.LogOutput == nil { + conf.LogOutput = os.Stderr + } + logger = log.New(conf.LogOutput, "", log.LstdFlags) + } + + // Try to restore the current term + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err != nil && err.Error() != "not found" { + return nil, fmt.Errorf("failed to load current term: %v", err) + } + + // Read the last log value + lastIdx, err := logs.LastIndex() + if err != nil { + return nil, fmt.Errorf("failed to find last log: %v", err) + } + + // Get the log + var lastLog Log + if lastIdx > 0 { + if err := logs.GetLog(lastIdx, &lastLog); err != nil { + return nil, fmt.Errorf("failed to get last log: %v", err) + } + } + + // Construct the list of peers that excludes us + localAddr := trans.LocalAddr() + peers, err := peerStore.Peers() + if err != nil { + return nil, fmt.Errorf("failed to get list of peers: %v", err) + } + peers = ExcludePeer(peers, localAddr) + + // Create Raft struct + r := &Raft{ + applyCh: make(chan *logFuture), + conf: conf, + fsm: fsm, + fsmCommitCh: make(chan commitTuple, 128), + fsmRestoreCh: make(chan *restoreFuture), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool), + localAddr: localAddr, + logger: logger, + logs: logs, + peerCh: make(chan *peerFuture), + peers: peers, + peerStore: peerStore, + rpcCh: trans.Consumer(), + snapshots: snaps, + snapshotCh: make(chan *snapshotFuture), + shutdownCh: make(chan struct{}), + stable: stable, + trans: trans, + verifyCh: make(chan *verifyFuture, 64), + } + + // Initialize as a follower + r.setState(Follower) + + // Restore the current term and the last log + r.setCurrentTerm(currentTerm) + r.setLastLogIndex(lastLog.Index) + r.setLastLogTerm(lastLog.Term) + + // Attempt to restore a snapshot if there are any + if err := r.restoreSnapshot(); err != nil { + return nil, err + } + + // Setup a heartbeat fast-path to avoid head-of-line + // blocking where possible. It MUST be safe for this + // to be called concurrently with a blocking RPC. + trans.SetHeartbeatHandler(r.processHeartbeat) + + // Start the background work + r.goFunc(r.run) + r.goFunc(r.runFSM) + r.goFunc(r.runSnapshots) + return r, nil +} + +// Leader is used to return the current leader of the cluster. +// It may return empty string if there is no current leader +// or the leader is unknown. +func (r *Raft) Leader() string { + r.leaderLock.RLock() + leader := r.leader + r.leaderLock.RUnlock() + return leader +} + +// setLeader is used to modify the current leader of the cluster +func (r *Raft) setLeader(leader string) { + r.leaderLock.Lock() + r.leader = leader + r.leaderLock.Unlock() +} + +// Apply is used to apply a command to the FSM in a highly consistent +// manner. This returns a future that can be used to wait on the application. +// An optional timeout can be provided to limit the amount of time we wait +// for the command to be started. This must be run on the leader or it +// will fail. +func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { + metrics.IncrCounter([]string{"raft", "apply"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogCommand, + Data: cmd, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// Barrier is used to issue a command that blocks until all preceeding +// operations have been applied to the FSM. It can be used to ensure the +// FSM reflects all queued writes. An optional timeout can be provided to +// limit the amount of time we wait for the command to be started. This +// must be run on the leader or it will fail. +func (r *Raft) Barrier(timeout time.Duration) Future { + metrics.IncrCounter([]string{"raft", "barrier"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogBarrier, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// VerifyLeader is used to ensure the current node is still +// the leader. This can be done to prevent stale reads when a +// new leader has potentially been elected. +func (r *Raft) VerifyLeader() Future { + metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) + verifyFuture := &verifyFuture{} + verifyFuture.init() + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.verifyCh <- verifyFuture: + return verifyFuture + } +} + +// AddPeer is used to add a new peer into the cluster. This must be +// run on the leader or it will fail. +func (r *Raft) AddPeer(peer string) Future { + logFuture := &logFuture{ + log: Log{ + Type: LogAddPeer, + peer: peer, + }, + } + logFuture.init() + select { + case r.applyCh <- logFuture: + return logFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// RemovePeer is used to remove a peer from the cluster. If the +// current leader is being removed, it will cause a new election +// to occur. This must be run on the leader or it will fail. +func (r *Raft) RemovePeer(peer string) Future { + logFuture := &logFuture{ + log: Log{ + Type: LogRemovePeer, + peer: peer, + }, + } + logFuture.init() + select { + case r.applyCh <- logFuture: + return logFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// SetPeers is used to forcibly replace the set of internal peers and +// the peerstore with the ones specified. This can be considered unsafe. +func (r *Raft) SetPeers(p []string) Future { + peerFuture := &peerFuture{ + peers: p, + } + peerFuture.init() + + select { + case r.peerCh <- peerFuture: + return peerFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// Shutdown is used to stop the Raft background routines. +// This is not a graceful operation. Provides a future that +// can be used to block until all background routines have exited. +func (r *Raft) Shutdown() Future { + r.shutdownLock.Lock() + defer r.shutdownLock.Unlock() + + if !r.shutdown { + close(r.shutdownCh) + r.shutdown = true + r.setState(Shutdown) + } + + return &shutdownFuture{r} +} + +// Snapshot is used to manually force Raft to take a snapshot. +// Returns a future that can be used to block until complete. +func (r *Raft) Snapshot() Future { + snapFuture := &snapshotFuture{} + snapFuture.init() + select { + case r.snapshotCh <- snapFuture: + return snapFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } + +} + +// State is used to return the current raft state. +func (r *Raft) State() RaftState { + return r.getState() +} + +// LeaderCh is used to get a channel which delivers signals on +// acquiring or losing leadership. It sends true if we become +// the leader, and false if we lose it. The channel is not buffered, +// and does not block on writes. +func (r *Raft) LeaderCh() <-chan bool { + return r.leaderCh +} + +func (r *Raft) String() string { + return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) +} + +// LastContact returns the time of last contact by a leader. +// This only makes sense if we are currently a follower. +func (r *Raft) LastContact() time.Time { + r.lastContactLock.RLock() + last := r.lastContact + r.lastContactLock.RUnlock() + return last +} + +// Stats is used to return a map of various internal stats. This should only +// be used for informative purposes or debugging. +func (r *Raft) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + s := map[string]string{ + "state": r.getState().String(), + "term": toString(r.getCurrentTerm()), + "last_log_index": toString(r.getLastLogIndex()), + "last_log_term": toString(r.getLastLogTerm()), + "commit_index": toString(r.getCommitIndex()), + "applied_index": toString(r.getLastApplied()), + "fsm_pending": toString(uint64(len(r.fsmCommitCh))), + "last_snapshot_index": toString(r.getLastSnapshotIndex()), + "last_snapshot_term": toString(r.getLastSnapshotTerm()), + "num_peers": toString(uint64(len(r.peers))), + } + last := r.LastContact() + if last.IsZero() { + s["last_contact"] = "never" + } else if r.getState() == Leader { + s["last_contact"] = "0" + } else { + s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) + } + return s +} + +// LastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +func (r *Raft) LastIndex() uint64 { + return r.getLastIndex() +} + +// AppliedIndex returns the last index applied to the FSM. +// This is generally lagging behind the last index, especially +// for indexes that are persisted but have not yet been considered +// committed by the leader. +func (r *Raft) AppliedIndex() uint64 { + return r.getLastApplied() +} + +// runFSM is a long running goroutine responsible for applying logs +// to the FSM. This is done async of other logs since we don't want +// the FSM to block our internal operations. +func (r *Raft) runFSM() { + var lastIndex, lastTerm uint64 + for { + select { + case req := <-r.fsmRestoreCh: + // Open the snapshot + meta, source, err := r.snapshots.Open(req.ID) + if err != nil { + req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) + continue + } + + // Attempt to restore + start := time.Now() + if err := r.fsm.Restore(source); err != nil { + req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) + source.Close() + continue + } + source.Close() + metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) + + // Update the last index and term + lastIndex = meta.Index + lastTerm = meta.Term + req.respond(nil) + + case req := <-r.fsmSnapshotCh: + // Get our peers + peers, err := r.peerStore.Peers() + if err != nil { + req.respond(err) + } + + // Start a snapshot + start := time.Now() + snap, err := r.fsm.Snapshot() + metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) + + // Respond to the request + req.index = lastIndex + req.term = lastTerm + req.peers = peers + req.snapshot = snap + req.respond(err) + + case commitTuple := <-r.fsmCommitCh: + // Apply the log if a command + var resp interface{} + if commitTuple.log.Type == LogCommand { + start := time.Now() + resp = r.fsm.Apply(commitTuple.log) + metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) + } + + // Update the indexes + lastIndex = commitTuple.log.Index + lastTerm = commitTuple.log.Term + + // Invoke the future if given + if commitTuple.future != nil { + commitTuple.future.response = resp + commitTuple.future.respond(nil) + } + case <-r.shutdownCh: + return + } + } +} + +// run is a long running goroutine that runs the Raft FSM. +func (r *Raft) run() { + for { + // Check if we are doing a shutdown + select { + case <-r.shutdownCh: + // Clear the leader to prevent forwarding + r.setLeader("") + return + default: + } + + // Enter into a sub-FSM + switch r.getState() { + case Follower: + r.runFollower() + case Candidate: + r.runCandidate() + case Leader: + r.runLeader() + } + } +} + +// runFollower runs the FSM for a follower. +func (r *Raft) runFollower() { + didWarn := false + r.logger.Printf("[INFO] raft: %v entering Follower state", r) + heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) + for { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case p := <-r.peerCh: + // Set the peers + r.peers = ExcludePeer(p.peers, r.localAddr) + p.respond(r.peerStore.SetPeers(p.peers)) + + case <-heartbeatTimer: + // Restart the heartbeat timer + heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) + + // Check if we have had a successful contact + lastContact := r.LastContact() + if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout { + continue + } + + // Heartbeat failed! Transition to the candidate state + r.setLeader("") + if len(r.peers) == 0 && !r.conf.EnableSingleNode { + if !didWarn { + r.logger.Printf("[WARN] raft: EnableSingleNode disabled, and no known peers. Aborting election.") + didWarn = true + } + } else { + r.logger.Printf("[WARN] raft: Heartbeat timeout reached, starting election") + r.setState(Candidate) + return + } + + case <-r.shutdownCh: + return + } + } +} + +// runCandidate runs the FSM for a candidate. +func (r *Raft) runCandidate() { + r.logger.Printf("[INFO] raft: %v entering Candidate state", r) + + // Start vote for us, and set a timeout + voteCh := r.electSelf() + electionTimer := randomTimeout(r.conf.ElectionTimeout) + + // Tally the votes, need a simple majority + grantedVotes := 0 + votesNeeded := r.quorumSize() + r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) + + for r.getState() == Candidate { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case vote := <-voteCh: + // Check if the term is greater than ours, bail + if vote.Term > r.getCurrentTerm() { + r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") + r.setState(Follower) + r.setCurrentTerm(vote.Term) + return + } + + // Check if the vote is granted + if vote.Granted { + grantedVotes++ + r.logger.Printf("[DEBUG] raft: Vote granted. Tally: %d", grantedVotes) + } + + // Check if we've become the leader + if grantedVotes >= votesNeeded { + r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) + r.setState(Leader) + r.setLeader(r.localAddr) + return + } + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case p := <-r.peerCh: + // Set the peers + r.peers = ExcludePeer(p.peers, r.localAddr) + p.respond(r.peerStore.SetPeers(p.peers)) + // Become a follower again + r.setState(Follower) + return + + case <-electionTimer: + // Election failed! Restart the election. We simply return, + // which will kick us back into runCandidate + r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") + return + + case <-r.shutdownCh: + return + } + } +} + +// runLeader runs the FSM for a leader. Do the setup here and drop into +// the leaderLoop for the hot loop. +func (r *Raft) runLeader() { + r.logger.Printf("[INFO] raft: %v entering Leader state", r) + + // Notify that we are the leader + asyncNotifyBool(r.leaderCh, true) + + // Setup leader state + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.inflight = newInflight(r.leaderState.commitCh) + r.leaderState.replState = make(map[string]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) + + // Cleanup state on step down + defer func() { + // Stop replication + for _, p := range r.leaderState.replState { + close(p.stopCh) + } + + // Cancel inflight requests + r.leaderState.inflight.Cancel(ErrLeadershipLost) + + // Respond to any pending verify requests + for future := range r.leaderState.notify { + future.respond(ErrLeadershipLost) + } + + // Clear all the state + r.leaderState.commitCh = nil + r.leaderState.inflight = nil + r.leaderState.replState = nil + r.leaderState.notify = nil + r.leaderState.stepDown = nil + + // If we are stepping down for some reason, no known leader. + // We may have stepped down due to an RPC call, which would + // provide the leader, so we cannot always blank this out. + r.leaderLock.Lock() + if r.leader == r.localAddr { + r.leader = "" + } + r.leaderLock.Unlock() + + // Notify that we are not the leader + asyncNotifyBool(r.leaderCh, false) + }() + + // Start a replication routine for each peer + for _, peer := range r.peers { + r.startReplication(peer) + } + + // Dispatch a no-op log first. Instead of LogNoop, + // we use a LogAddPeer with our peerset. This acts like + // a no-op as well, but when doing an initial bootstrap, ensures + // that all nodes share a common peerset. + peerSet := append([]string{r.localAddr}, r.peers...) + noop := &logFuture{ + log: Log{ + Type: LogAddPeer, + Data: encodePeers(peerSet, r.trans), + }, + } + r.dispatchLogs([]*logFuture{noop}) + + // Disable EnableSingleNode after we've been elected leader. + // This is to prevent a split brain in the future, if we are removed + // from the cluster and then elect ourself as leader. + if r.conf.DisableBootstrapAfterElect && r.conf.EnableSingleNode { + r.logger.Printf("[INFO] raft: Disabling EnableSingleNode (bootstrap)") + r.conf.EnableSingleNode = false + } + + // Sit in the leader loop until we step down + r.leaderLoop() +} + +// startReplication is a helper to setup state and start async replication to a peer. +func (r *Raft) startReplication(peer string) { + lastIdx := r.getLastIndex() + s := &followerReplication{ + peer: peer, + inflight: r.leaderState.inflight, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + currentTerm: r.getCurrentTerm(), + matchIndex: 0, + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, + } + r.leaderState.replState[peer] = s + r.goFunc(func() { r.replicate(s) }) + asyncNotifyCh(s.triggerCh) +} + +// leaderLoop is the hot loop for a leader. It is invoked +// after all the various leader setup is done. +func (r *Raft) leaderLoop() { + lease := time.After(r.conf.LeaderLeaseTimeout) + for r.getState() == Leader { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case <-r.leaderState.stepDown: + r.setState(Follower) + + case <-r.leaderState.commitCh: + // Get the committed messages + committed := r.leaderState.inflight.Committed() + for e := committed.Front(); e != nil; e = e.Next() { + // Measure the commit time + commitLog := e.Value.(*logFuture) + metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) + + // Increment the commit index + idx := commitLog.log.Index + r.setCommitIndex(idx) + r.processLogs(idx, commitLog) + } + + case v := <-r.verifyCh: + if v.quorumSize == 0 { + // Just dispatched, start the verification + r.verifyLeader(v) + + } else if v.votes < v.quorumSize { + // Early return, means there must be a new leader + r.logger.Printf("[WARN] raft: New leader elected, stepping down") + r.setState(Follower) + delete(r.leaderState.notify, v) + v.respond(ErrNotLeader) + + } else { + // Quorum of members agree, we are still leader + delete(r.leaderState.notify, v) + v.respond(nil) + } + + case p := <-r.peerCh: + p.respond(ErrLeader) + + case newLog := <-r.applyCh: + // Group commit, gather all the ready commits + ready := []*logFuture{newLog} + for i := 0; i < r.conf.MaxAppendEntries; i++ { + select { + case newLog := <-r.applyCh: + ready = append(ready, newLog) + default: + break + } + } + + // Handle any peer set changes + n := len(ready) + for i := 0; i < n; i++ { + // Special case AddPeer and RemovePeer + log := ready[i] + if log.log.Type != LogAddPeer && log.log.Type != LogRemovePeer { + continue + } + + // Check if this log should be ignored + if !r.preparePeerChange(log) { + ready[i], ready[n-1] = ready[n-1], nil + n-- + i-- + continue + } + + // Apply peer set changes early + r.processLog(&log.log, nil, true) + } + + // Nothing to do if all logs are invalid + if n == 0 { + continue + } + + // Dispatch the logs + ready = ready[:n] + r.dispatchLogs(ready) + + case <-lease: + // Check if we've exceeded the lease, potentially stepping down + maxDiff := r.checkLeaderLease() + + // Next check interval should adjust for the last node we've + // contacted, without going negative + checkInterval := r.conf.LeaderLeaseTimeout - maxDiff + if checkInterval < minCheckInterval { + checkInterval = minCheckInterval + } + + // Renew the lease timer + lease = time.After(checkInterval) + + case <-r.shutdownCh: + return + } + } +} + +// verifyLeader must be called from the main thread for safety. +// Causes the followers to attempt an immediate heartbeat. +func (r *Raft) verifyLeader(v *verifyFuture) { + // Current leader always votes for self + v.votes = 1 + + // Set the quorum size, hot-path for single node + v.quorumSize = r.quorumSize() + if v.quorumSize == 1 { + v.respond(nil) + return + } + + // Track this request + v.notifyCh = r.verifyCh + r.leaderState.notify[v] = struct{}{} + + // Trigger immediate heartbeats + for _, repl := range r.leaderState.replState { + repl.notifyLock.Lock() + repl.notify = append(repl.notify, v) + repl.notifyLock.Unlock() + asyncNotifyCh(repl.notifyCh) + } +} + +// checkLeaderLease is used to check if we can contact a quorum of nodes +// within the last leader lease interval. If not, we need to step down, +// as we may have lost connectivity. Returns the maximum duration without +// contact. +func (r *Raft) checkLeaderLease() time.Duration { + // Track contacted nodes, we can always contact ourself + contacted := 1 + + // Check each follower + var maxDiff time.Duration + now := time.Now() + for peer, f := range r.leaderState.replState { + diff := now.Sub(f.LastContact()) + if diff <= r.conf.LeaderLeaseTimeout { + contacted++ + if diff > maxDiff { + maxDiff = diff + } + } else { + // Log at least once at high value, then debug. Otherwise it gets very verbose. + if diff <= 3*r.conf.LeaderLeaseTimeout { + r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) + } else { + r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) + } + } + metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) + } + + // Verify we can contact a quorum + quorum := r.quorumSize() + if contacted < quorum { + r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") + r.setState(Follower) + } + return maxDiff +} + +// quorumSize is used to return the quorum size +func (r *Raft) quorumSize() int { + return ((len(r.peers) + 1) / 2) + 1 +} + +// preparePeerChange checks if a LogAddPeer or LogRemovePeer should be performed, +// and properly formats the data field on the log before dispatching it. +func (r *Raft) preparePeerChange(l *logFuture) bool { + // Check if this is a known peer + p := l.log.peer + knownPeer := PeerContained(r.peers, p) || r.localAddr == p + + // Ignore known peers on add + if l.log.Type == LogAddPeer && knownPeer { + l.respond(ErrKnownPeer) + return false + } + + // Ignore unknown peers on remove + if l.log.Type == LogRemovePeer && !knownPeer { + l.respond(ErrUnknownPeer) + return false + } + + // Construct the peer set + var peerSet []string + if l.log.Type == LogAddPeer { + peerSet = append([]string{p, r.localAddr}, r.peers...) + } else { + peerSet = ExcludePeer(append([]string{r.localAddr}, r.peers...), p) + } + + // Setup the log + l.log.Data = encodePeers(peerSet, r.trans) + return true +} + +// dispatchLog is called to push a log to disk, mark it +// as inflight and begin replication of it. +func (r *Raft) dispatchLogs(applyLogs []*logFuture) { + now := time.Now() + defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) + + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + logs := make([]*Log, len(applyLogs)) + + for idx, applyLog := range applyLogs { + applyLog.dispatch = now + applyLog.log.Index = lastIndex + uint64(idx) + 1 + applyLog.log.Term = term + applyLog.policy = newMajorityQuorum(len(r.peers) + 1) + logs[idx] = &applyLog.log + } + + // Write the log entry locally + if err := r.logs.StoreLogs(logs); err != nil { + r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) + for _, applyLog := range applyLogs { + applyLog.respond(err) + } + r.setState(Follower) + return + } + + // Add this to the inflight logs, commit + r.leaderState.inflight.StartAll(applyLogs) + + // Update the last log since it's on disk now + r.setLastLogIndex(lastIndex + uint64(len(applyLogs))) + r.setLastLogTerm(term) + + // Notify the replicators of the new log + for _, f := range r.leaderState.replState { + asyncNotifyCh(f.triggerCh) + } +} + +// processLogs is used to process all the logs from the lastApplied +// up to the given index. +func (r *Raft) processLogs(index uint64, future *logFuture) { + // Reject logs we've applied already + lastApplied := r.getLastApplied() + if index <= lastApplied { + r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) + return + } + + // Apply all the preceding logs + for idx := r.getLastApplied() + 1; idx <= index; idx++ { + // Get the log, either from the future or from our log store + if future != nil && future.log.Index == idx { + r.processLog(&future.log, future, false) + + } else { + l := new(Log) + if err := r.logs.GetLog(idx, l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) + panic(err) + } + r.processLog(l, nil, false) + } + + // Update the lastApplied index and term + r.setLastApplied(idx) + } +} + +// processLog is invoked to process the application of a single committed log. +func (r *Raft) processLog(l *Log, future *logFuture, precommit bool) { + switch l.Type { + case LogBarrier: + // Barrier is handled by the FSM + fallthrough + + case LogCommand: + // Forward to the fsm handler + select { + case r.fsmCommitCh <- commitTuple{l, future}: + case <-r.shutdownCh: + if future != nil { + future.respond(ErrRaftShutdown) + } + } + + // Return so that the future is only responded to + // by the FSM handler when the application is done + return + + case LogAddPeer: + fallthrough + case LogRemovePeer: + peers := decodePeers(l.Data, r.trans) + r.logger.Printf("[DEBUG] raft: Node %v updated peer set (%v): %v", r.localAddr, l.Type, peers) + + // If the peer set does not include us, remove all other peers + removeSelf := !PeerContained(peers, r.localAddr) && l.Type == LogRemovePeer + if removeSelf { + r.peers = nil + r.peerStore.SetPeers([]string{r.localAddr}) + } else { + r.peers = ExcludePeer(peers, r.localAddr) + r.peerStore.SetPeers(peers) + } + + // Handle replication if we are the leader + if r.getState() == Leader { + for _, p := range r.peers { + if _, ok := r.leaderState.replState[p]; !ok { + r.logger.Printf("[INFO] raft: Added peer %v, starting replication", p) + r.startReplication(p) + } + } + } + + // Stop replication for old nodes + if r.getState() == Leader && !precommit { + var toDelete []string + for _, repl := range r.leaderState.replState { + if !PeerContained(r.peers, repl.peer) { + r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication (Index: %d)", repl.peer, l.Index) + + // Replicate up to this index and stop + repl.stopCh <- l.Index + close(repl.stopCh) + toDelete = append(toDelete, repl.peer) + } + } + for _, name := range toDelete { + delete(r.leaderState.replState, name) + } + } + + // Handle removing ourself + if removeSelf && !precommit { + if r.conf.ShutdownOnRemove { + r.logger.Printf("[INFO] raft: Removed ourself, shutting down") + r.Shutdown() + } else { + r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") + r.setState(Follower) + } + } + + case LogNoop: + // Ignore the no-op + default: + r.logger.Printf("[ERR] raft: Got unrecognized log type: %#v", l) + } + + // Invoke the future if given + if future != nil && !precommit { + future.respond(nil) + } +} + +// processRPC is called to handle an incoming RPC request. +func (r *Raft) processRPC(rpc RPC) { + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + case *RequestVoteRequest: + r.requestVote(rpc, cmd) + case *InstallSnapshotRequest: + r.installSnapshot(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// processHeartbeat is a special handler used just for heartbeat requests +// so that they can be fast-pathed if a transport supports it. +func (r *Raft) processHeartbeat(rpc RPC) { + defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) + + // Check if we are shutdown, just ignore the RPC + select { + case <-r.shutdownCh: + return + default: + } + + // Ensure we are only handling a heartbeat + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// appendEntries is invoked when we get an append entries RPC call. +func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) + // Setup a response + resp := &AppendEntriesResponse{ + Term: r.getCurrentTerm(), + LastLog: r.getLastIndex(), + Success: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if a.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one, also transition to follower + // if we ever get an appendEntries call + if a.Term > r.getCurrentTerm() || r.getState() != Follower { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(a.Term) + resp.Term = a.Term + } + + // Save the current leader + r.setLeader(r.trans.DecodePeer(a.Leader)) + + // Verify the last log entry + if a.PrevLogEntry > 0 { + lastIdx, lastTerm := r.getLastEntry() + + var prevLogTerm uint64 + if a.PrevLogEntry == lastIdx { + prevLogTerm = lastTerm + + } else { + var prevLog Log + if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { + r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", + a.PrevLogEntry, err, lastIdx) + return + } + prevLogTerm = prevLog.Term + } + + if a.PrevLogTerm != prevLogTerm { + r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", + prevLogTerm, a.PrevLogTerm) + return + } + } + + // Process any new entries + if n := len(a.Entries); n > 0 { + start := time.Now() + first := a.Entries[0] + last := a.Entries[n-1] + + // Delete any conflicting entries + lastLogIdx := r.getLastLogIndex() + if first.Index <= lastLogIdx { + r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", first.Index, lastLogIdx) + if err := r.logs.DeleteRange(first.Index, lastLogIdx); err != nil { + r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) + return + } + } + + // Append the entry + if err := r.logs.StoreLogs(a.Entries); err != nil { + r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) + return + } + + // Update the lastLog + r.setLastLogIndex(last.Index) + r.setLastLogTerm(last.Term) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) + } + + // Update the commit index + if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { + start := time.Now() + idx := min(a.LeaderCommitIndex, r.getLastIndex()) + r.setCommitIndex(idx) + r.processLogs(idx, nil) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) + } + + // Everything went well, set success + resp.Success = true + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() + return +} + +// requestVote is invoked when we get an request vote RPC call. +func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + // Setup a response + resp := &RequestVoteResponse{ + Term: r.getCurrentTerm(), + Peers: encodePeers(r.peers, r.trans), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Check if we have an existing leader + if leader := r.Leader(); leader != "" { + r.logger.Printf("[WARN] raft: Rejecting vote from %v since we have a leader: %v", + r.trans.DecodePeer(req.Candidate), leader) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Check if we have voted yet + lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) + return + } + lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) + return + } + + // Check if we've voted in this election before + if lastVoteTerm == req.Term && lastVoteCandBytes != nil { + r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) + if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { + r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) + resp.Granted = true + } + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Printf("[WARN] raft: Rejecting vote from %v since our last term is greater (%d, %d)", + r.trans.DecodePeer(req.Candidate), lastTerm, req.LastLogTerm) + return + } + + if lastIdx > req.LastLogIndex { + r.logger.Printf("[WARN] raft: Rejecting vote from %v since our last index is greater (%d, %d)", + r.trans.DecodePeer(req.Candidate), lastIdx, req.LastLogIndex) + return + } + + // Persist a vote for safety + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) + return + } + + resp.Granted = true + return +} + +// installSnapshot is invoked when we get a InstallSnapshot RPC call. +// We must be in the follower state for this, since it means we are +// too far behind a leader for log replay. +func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) + // Setup a response + resp := &InstallSnapshotResponse{ + Term: r.getCurrentTerm(), + Success: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Save the current leader + r.setLeader(r.trans.DecodePeer(req.Leader)) + + // Create a new snapshot + sink, err := r.snapshots.Create(req.LastLogIndex, req.LastLogTerm, req.Peers) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) + rpcErr = fmt.Errorf("failed to create snapshot: %v", err) + return + } + + // Spill the remote snapshot to disk + n, err := io.Copy(sink, rpc.Reader) + if err != nil { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) + rpcErr = err + return + } + + // Check that we received it all + if n != req.Size { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) + rpcErr = fmt.Errorf("short read") + return + } + + // Finalize the snapshot + if err := sink.Close(); err != nil { + r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) + rpcErr = err + return + } + r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + + // Restore snapshot + future := &restoreFuture{ID: sink.ID()} + future.init() + select { + case r.fsmRestoreCh <- future: + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return + } + + // Wait for the restore to happen + if err := future.Error(); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) + rpcErr = err + return + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(req.LastLogIndex) + + // Update the last stable snapshot info + r.setLastSnapshotIndex(req.LastLogIndex) + r.setLastSnapshotTerm(req.LastLogTerm) + + // Restore the peer set + peers := decodePeers(req.Peers, r.trans) + r.peers = ExcludePeer(peers, r.localAddr) + r.peerStore.SetPeers(peers) + + // Compact logs, continue even if this fails + if err := r.compactLogs(req.LastLogIndex); err != nil { + r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) + } + + r.logger.Printf("[INFO] raft: Installed remote snapshot") + resp.Success = true + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() + return +} + +// electSelf is used to send a RequestVote RPC to all peers, +// and vote for ourself. This has the side affecting of incrementing +// the current term. The response channel returned is used to wait +// for all the responses (including a vote for ourself). +func (r *Raft) electSelf() <-chan *RequestVoteResponse { + // Create a response channel + respCh := make(chan *RequestVoteResponse, len(r.peers)+1) + + // Increment the term + r.setCurrentTerm(r.getCurrentTerm() + 1) + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestVoteRequest{ + Term: r.getCurrentTerm(), + Candidate: r.trans.EncodePeer(r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + } + + // Construct a function to ask for a vote + askPeer := func(peer string) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) + resp := new(RequestVoteResponse) + err := r.trans.RequestVote(peer, req, resp) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) + resp.Term = req.Term + resp.Granted = false + } + + // If we are not a peer, we could have been removed but failed + // to receive the log message. OR it could mean an improperly configured + // cluster. Either way, we should warn + if err == nil { + peerSet := decodePeers(resp.Peers, r.trans) + if !PeerContained(peerSet, r.localAddr) { + r.logger.Printf("[WARN] raft: Remote peer %v does not have local node %v as a peer", + peer, r.localAddr) + } + } + + respCh <- resp + }) + } + + // For each peer, request a vote + for _, peer := range r.peers { + askPeer(peer) + } + + // Persist a vote for ourselves + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) + return nil + } + + // Include our own vote + respCh <- &RequestVoteResponse{ + Term: req.Term, + Granted: true, + } + return respCh +} + +// persistVote is used to persist our vote for safety. +func (r *Raft) persistVote(term uint64, candidate []byte) error { + if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { + return err + } + if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { + return err + } + return nil +} + +// setCurrentTerm is used to set the current term in a durable manner. +func (r *Raft) setCurrentTerm(t uint64) { + // Persist to disk first + if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { + panic(fmt.Errorf("failed to save current term: %v", err)) + } + r.raftState.setCurrentTerm(t) +} + +// setState is used to update the current state. Any state +// transition causes the known leader to be cleared. This means +// that leader should be set only after updating the state. +func (r *Raft) setState(state RaftState) { + r.setLeader("") + r.raftState.setState(state) +} + +// runSnapshots is a long running goroutine used to manage taking +// new snapshots of the FSM. It runs in parallel to the FSM and +// main goroutines, so that snapshots do not block normal operation. +func (r *Raft) runSnapshots() { + for { + select { + case <-randomTimeout(r.conf.SnapshotInterval): + // Check if we should snapshot + if !r.shouldSnapshot() { + continue + } + + // Trigger a snapshot + if err := r.takeSnapshot(); err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + + case future := <-r.snapshotCh: + // User-triggered, run immediately + err := r.takeSnapshot() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + future.respond(err) + + case <-r.shutdownCh: + return + } + } +} + +// shouldSnapshot checks if we meet the conditions to take +// a new snapshot. +func (r *Raft) shouldSnapshot() bool { + // Check the last snapshot index + lastSnap := r.getLastSnapshotIndex() + + // Check the last log index + lastIdx, err := r.logs.LastIndex() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) + return false + } + + // Compare the delta to the threshold + delta := lastIdx - lastSnap + return delta >= r.conf.SnapshotThreshold +} + +// takeSnapshot is used to take a new snapshot. +func (r *Raft) takeSnapshot() error { + defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) + // Create a snapshot request + req := &reqSnapshotFuture{} + req.init() + + // Wait for dispatch or shutdown + select { + case r.fsmSnapshotCh <- req: + case <-r.shutdownCh: + return ErrRaftShutdown + } + + // Wait until we get a response + if err := req.Error(); err != nil { + return fmt.Errorf("failed to start snapshot: %v", err) + } + defer req.snapshot.Release() + + // Log that we are starting the snapshot + r.logger.Printf("[INFO] raft: Starting snapshot up to %d", req.index) + + // Encode the peerset + peerSet := encodePeers(req.peers, r.trans) + + // Create a new snapshot + start := time.Now() + sink, err := r.snapshots.Create(req.index, req.term, peerSet) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) + + // Try to persist the snapshot + start = time.Now() + if err := req.snapshot.Persist(sink); err != nil { + sink.Cancel() + return fmt.Errorf("failed to persist snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) + + // Close and check for error + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to close snapshot: %v", err) + } + + // Update the last stable snapshot info + r.setLastSnapshotIndex(req.index) + r.setLastSnapshotTerm(req.term) + + // Compact the logs + if err := r.compactLogs(req.index); err != nil { + return err + } + + // Log completion + r.logger.Printf("[INFO] raft: Snapshot to %d complete", req.index) + return nil +} + +// compactLogs takes the last inclusive index of a snapshot +// and trims the logs that are no longer needed. +func (r *Raft) compactLogs(snapIdx uint64) error { + defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) + // Determine log ranges to compact + minLog, err := r.logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + + // Check if we have enough logs to truncate + if r.getLastLogIndex() <= r.conf.TrailingLogs { + return nil + } + + // Truncate up to the end of the snapshot, or `TrailingLogs` + // back from the head, which ever is further back. This ensures + // at least `TrailingLogs` entries, but does not allow logs + // after the snapshot to be removed. + maxLog := min(snapIdx, r.getLastLogIndex()-r.conf.TrailingLogs) + + // Log this + r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) + + // Compact the logs + if err := r.logs.DeleteRange(minLog, maxLog); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + return nil +} + +// restoreSnapshot attempts to restore the latest snapshots, and fails +// if none of them can be restored. This is called at initialization time, +// and is completely unsafe to call at any other time. +func (r *Raft) restoreSnapshot() error { + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return err + } + + // Try to load in order of newest to oldest + for _, snapshot := range snapshots { + _, source, err := r.snapshots.Open(snapshot.ID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) + continue + } + defer source.Close() + + if err := r.fsm.Restore(source); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) + continue + } + + // Log success + r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(snapshot.Index) + + // Update the last stable snapshot info + r.setLastSnapshotIndex(snapshot.Index) + r.setLastSnapshotTerm(snapshot.Term) + + // Success! + return nil + } + + // If we had snapshots and failed to load them, its an error + if len(snapshots) > 0 { + return fmt.Errorf("failed to load any existing snapshots") + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go new file mode 100644 index 000000000..284a5dd0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go @@ -0,0 +1,1454 @@ +package raft + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +// MockFSM is an implementation of the FSM interface, and just stores +// the logs sequentially. +type MockFSM struct { + sync.Mutex + logs [][]byte +} + +type MockSnapshot struct { + logs [][]byte + maxIndex int +} + +func (m *MockFSM) Apply(log *Log) interface{} { + m.Lock() + defer m.Unlock() + m.logs = append(m.logs, log.Data) + return len(m.logs) +} + +func (m *MockFSM) Snapshot() (FSMSnapshot, error) { + m.Lock() + defer m.Unlock() + return &MockSnapshot{m.logs, len(m.logs)}, nil +} + +func (m *MockFSM) Restore(inp io.ReadCloser) error { + m.Lock() + defer m.Unlock() + defer inp.Close() + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(inp, &hd) + + m.logs = nil + return dec.Decode(&m.logs) +} + +func (m *MockSnapshot) Persist(sink SnapshotSink) error { + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(sink, &hd) + if err := enc.Encode(m.logs[:m.maxIndex]); err != nil { + sink.Cancel() + return err + } + sink.Close() + return nil +} + +func (m *MockSnapshot) Release() { +} + +// Return configurations optimized for in-memory +func inmemConfig() *Config { + conf := DefaultConfig() + conf.HeartbeatTimeout = 50 * time.Millisecond + conf.ElectionTimeout = 50 * time.Millisecond + conf.LeaderLeaseTimeout = 50 * time.Millisecond + conf.CommitTimeout = time.Millisecond + return conf +} + +type cluster struct { + dirs []string + stores []*InmemStore + fsms []*MockFSM + snaps []*FileSnapshotStore + trans []*InmemTransport + rafts []*Raft +} + +func (c *cluster) Merge(other *cluster) { + c.dirs = append(c.dirs, other.dirs...) + c.stores = append(c.stores, other.stores...) + c.fsms = append(c.fsms, other.fsms...) + c.snaps = append(c.snaps, other.snaps...) + c.trans = append(c.trans, other.trans...) + c.rafts = append(c.rafts, other.rafts...) +} + +func (c *cluster) Close() { + var futures []Future + for _, r := range c.rafts { + futures = append(futures, r.Shutdown()) + } + + // Wait for shutdown + timer := time.AfterFunc(200*time.Millisecond, func() { + panic("timed out waiting for shutdown") + }) + + for _, f := range futures { + if err := f.Error(); err != nil { + panic(fmt.Errorf("shutdown future err: %v", err)) + } + } + timer.Stop() + + for _, d := range c.dirs { + os.RemoveAll(d) + } +} + +func (c *cluster) GetInState(s RaftState) []*Raft { + in := make([]*Raft, 0, 1) + for _, r := range c.rafts { + if r.State() == s { + in = append(in, r) + } + } + return in +} + +func (c *cluster) Leader() *Raft { + timeout := time.AfterFunc(400*time.Millisecond, func() { + panic("timeout waiting for leader") + }) + defer timeout.Stop() + + for len(c.GetInState(Leader)) < 1 { + time.Sleep(time.Millisecond) + } + leaders := c.GetInState(Leader) + if len(leaders) != 1 { + panic(fmt.Errorf("expected one leader: %v", leaders)) + } + return leaders[0] +} + +func (c *cluster) FullyConnect() { + log.Printf("[WARN] Fully Connecting") + for i, t1 := range c.trans { + for j, t2 := range c.trans { + if i != j { + t1.Connect(t2.LocalAddr(), t2) + t2.Connect(t1.LocalAddr(), t1) + } + } + } +} + +func (c *cluster) Disconnect(a string) { + log.Printf("[WARN] Disconnecting %v", a) + for _, t := range c.trans { + if t.localAddr == a { + t.DisconnectAll() + } else { + t.Disconnect(a) + } + } +} + +func (c *cluster) EnsureLeader(t *testing.T, expect string) { + limit := time.Now().Add(400 * time.Millisecond) +CHECK: + for _, r := range c.rafts { + leader := r.Leader() + if expect == "" { + if leader != "" { + if time.Now().After(limit) { + t.Fatalf("leader %v expected nil", leader) + } else { + goto WAIT + } + } + } else { + if leader == "" || leader != expect { + if time.Now().After(limit) { + t.Fatalf("leader %v expected %v", leader, expect) + } else { + goto WAIT + } + } + } + } + + return +WAIT: + time.Sleep(10 * time.Millisecond) + goto CHECK +} + +func (c *cluster) EnsureSame(t *testing.T) { + limit := time.Now().Add(400 * time.Millisecond) + first := c.fsms[0] + +CHECK: + first.Lock() + for i, fsm := range c.fsms { + if i == 0 { + continue + } + fsm.Lock() + + if len(first.logs) != len(fsm.logs) { + fsm.Unlock() + if time.Now().After(limit) { + t.Fatalf("length mismatch: %d %d", + len(first.logs), len(fsm.logs)) + } else { + goto WAIT + } + } + + for idx := 0; idx < len(first.logs); idx++ { + if bytes.Compare(first.logs[idx], fsm.logs[idx]) != 0 { + fsm.Unlock() + if time.Now().After(limit) { + t.Fatalf("log mismatch at index %d", idx) + } else { + goto WAIT + } + } + } + fsm.Unlock() + } + + first.Unlock() + return + +WAIT: + first.Unlock() + time.Sleep(20 * time.Millisecond) + goto CHECK +} + +func raftToPeerSet(r *Raft) map[string]struct{} { + peers := make(map[string]struct{}) + peers[r.localAddr] = struct{}{} + + raftPeers, _ := r.peerStore.Peers() + for _, p := range raftPeers { + peers[p] = struct{}{} + } + return peers +} + +func (c *cluster) EnsureSamePeers(t *testing.T) { + limit := time.Now().Add(400 * time.Millisecond) + peerSet := raftToPeerSet(c.rafts[0]) + +CHECK: + for i, raft := range c.rafts { + if i == 0 { + continue + } + + otherSet := raftToPeerSet(raft) + if !reflect.DeepEqual(peerSet, otherSet) { + if time.Now().After(limit) { + t.Fatalf("peer mismatch: %v %v", peerSet, otherSet) + } else { + goto WAIT + } + } + } + return + +WAIT: + time.Sleep(20 * time.Millisecond) + goto CHECK +} + +func MakeCluster(n int, t *testing.T, conf *Config) *cluster { + c := &cluster{} + peers := make([]string, 0, n) + + // Setup the stores and transports + for i := 0; i < n; i++ { + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + store := NewInmemStore() + c.dirs = append(c.dirs, dir) + c.stores = append(c.stores, store) + c.fsms = append(c.fsms, &MockFSM{}) + + dir2, snap := FileSnapTest(t) + c.dirs = append(c.dirs, dir2) + c.snaps = append(c.snaps, snap) + + addr, trans := NewInmemTransport() + c.trans = append(c.trans, trans) + peers = append(peers, addr) + } + + // Wire the transports together + c.FullyConnect() + + // Create all the rafts + for i := 0; i < n; i++ { + if conf == nil { + conf = inmemConfig() + } + if n == 1 { + conf.EnableSingleNode = true + } + + logs := c.stores[i] + store := c.stores[i] + snap := c.snaps[i] + trans := c.trans[i] + peerStore := &StaticPeers{StaticPeers: peers} + + raft, err := NewRaft(conf, c.fsms[i], logs, store, snap, peerStore, trans) + if err != nil { + t.Fatalf("err: %v", err) + } + c.rafts = append(c.rafts, raft) + } + + return c +} + +func MakeClusterNoPeers(n int, t *testing.T, conf *Config) *cluster { + c := &cluster{} + + // Setup the stores and transports + for i := 0; i < n; i++ { + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + store := NewInmemStore() + c.dirs = append(c.dirs, dir) + c.stores = append(c.stores, store) + c.fsms = append(c.fsms, &MockFSM{}) + + dir2, snap := FileSnapTest(t) + c.dirs = append(c.dirs, dir2) + c.snaps = append(c.snaps, snap) + + _, trans := NewInmemTransport() + c.trans = append(c.trans, trans) + } + + // Wire the transports together + c.FullyConnect() + + // Create all the rafts + for i := 0; i < n; i++ { + if conf == nil { + conf = inmemConfig() + } + + logs := c.stores[i] + store := c.stores[i] + snap := c.snaps[i] + trans := c.trans[i] + peerStore := &StaticPeers{} + + raft, err := NewRaft(conf, c.fsms[i], logs, store, snap, peerStore, trans) + if err != nil { + t.Fatalf("err: %v", err) + } + c.rafts = append(c.rafts, raft) + } + + return c +} + +func TestRaft_StartStop(t *testing.T) { + c := MakeCluster(1, t, nil) + c.Close() +} + +func TestRaft_AfterShutdown(t *testing.T) { + c := MakeCluster(1, t, nil) + c.Close() + raft := c.rafts[0] + + // Everything should fail now + if f := raft.Apply(nil, 0); f.Error() != ErrRaftShutdown { + t.Fatalf("should be shutdown: %v", f.Error()) + } + if f := raft.AddPeer(NewInmemAddr()); f.Error() != ErrRaftShutdown { + t.Fatalf("should be shutdown: %v", f.Error()) + } + if f := raft.RemovePeer(NewInmemAddr()); f.Error() != ErrRaftShutdown { + t.Fatalf("should be shutdown: %v", f.Error()) + } + if f := raft.Snapshot(); f.Error() != ErrRaftShutdown { + t.Fatalf("should be shutdown: %v", f.Error()) + } + + // Should be idempotent + raft.Shutdown() +} + +func TestRaft_SingleNode(t *testing.T) { + conf := inmemConfig() + c := MakeCluster(1, t, conf) + defer c.Close() + raft := c.rafts[0] + + // Watch leaderCh for change + select { + case v := <-raft.LeaderCh(): + if !v { + t.Fatalf("should become leader") + } + case <-time.After(conf.HeartbeatTimeout * 3): + t.Fatalf("timeout becoming leader") + } + + // Should be leader + if s := raft.State(); s != Leader { + t.Fatalf("expected leader: %v", s) + } + + // Should be able to apply + future := raft.Apply([]byte("test"), time.Millisecond) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Check the response + if future.Response().(int) != 1 { + t.Fatalf("bad response: %v", future.Response()) + } + + // Check the index + if idx := future.Index(); idx == 0 { + t.Fatalf("bad index: %d", idx) + } + + // Check that it is applied to the FSM + if len(c.fsms[0].logs) != 1 { + t.Fatalf("did not apply to FSM!") + } +} + +func TestRaft_TripleNode(t *testing.T) { + // Make the cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Should be one leader + leader := c.Leader() + c.EnsureLeader(t, leader.localAddr) + + // Should be able to apply + future := leader.Apply([]byte("test"), time.Millisecond) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait for replication + time.Sleep(30 * time.Millisecond) + + // Check that it is applied to the FSM + for _, fsm := range c.fsms { + fsm.Lock() + num := len(fsm.logs) + fsm.Unlock() + if num != 1 { + t.Fatalf("did not apply to FSM!") + } + } +} + +func TestRaft_LeaderFail(t *testing.T) { + // Make the cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Should be one leader + leader := c.Leader() + + // Should be able to apply + future := leader.Apply([]byte("test"), time.Millisecond) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait for replication + time.Sleep(30 * time.Millisecond) + + // Disconnect the leader now + log.Printf("[INFO] Disconnecting %v", leader) + c.Disconnect(leader.localAddr) + + // Wait for new leader + limit := time.Now().Add(200 * time.Millisecond) + var newLead *Raft + for time.Now().Before(limit) && newLead == nil { + time.Sleep(10 * time.Millisecond) + leaders := c.GetInState(Leader) + if len(leaders) == 1 && leaders[0] != leader { + newLead = leaders[0] + } + } + if newLead == nil { + t.Fatalf("expected new leader") + } + + // Ensure the term is greater + if newLead.getCurrentTerm() <= leader.getCurrentTerm() { + t.Fatalf("expected newer term! %d %d", newLead.getCurrentTerm(), leader.getCurrentTerm()) + } + + // Apply should work not work on old leader + future1 := leader.Apply([]byte("fail"), time.Millisecond) + + // Apply should work on newer leader + future2 := newLead.Apply([]byte("apply"), time.Millisecond) + + // Future2 should work + if err := future2.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Reconnect the networks + log.Printf("[INFO] Reconnecting %v", leader) + c.FullyConnect() + + // Future1 should fail + if err := future1.Error(); err != ErrLeadershipLost && err != ErrNotLeader { + t.Fatalf("err: %v", err) + } + + // Wait for log replication + c.EnsureSame(t) + + // Check two entries are applied to the FSM + for _, fsm := range c.fsms { + fsm.Lock() + if len(fsm.logs) != 2 { + t.Fatalf("did not apply both to FSM! %v", fsm.logs) + } + if bytes.Compare(fsm.logs[0], []byte("test")) != 0 { + t.Fatalf("first entry should be 'test'") + } + if bytes.Compare(fsm.logs[1], []byte("apply")) != 0 { + t.Fatalf("second entry should be 'apply'") + } + fsm.Unlock() + } +} + +func TestRaft_BehindFollower(t *testing.T) { + // Make the cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Disconnect one follower + leader := c.Leader() + followers := c.GetInState(Follower) + behind := followers[0] + c.Disconnect(behind.localAddr) + + // Commit a lot of things + var future Future + for i := 0; i < 100; i++ { + future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + } + + // Wait for the last future to apply + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } else { + log.Printf("[INFO] Finished apply without behind follower") + } + + // Check that we have a non zero last contact + if behind.LastContact().IsZero() { + t.Fatalf("expected previous contact") + } + + // Reconnect the behind node + c.FullyConnect() + + // Ensure all the logs are the same + c.EnsureSame(t) + + // Ensure one leader + leader = c.Leader() + c.EnsureLeader(t, leader.localAddr) +} + +func TestRaft_ApplyNonLeader(t *testing.T) { + // Make the cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Wait for a leader + c.Leader() + time.Sleep(10 * time.Millisecond) + + // Try to apply to them + followers := c.GetInState(Follower) + if len(followers) != 2 { + t.Fatalf("Expected 2 followers") + } + follower := followers[0] + + // Try to apply + future := follower.Apply([]byte("test"), time.Millisecond) + + if future.Error() != ErrNotLeader { + t.Fatalf("should not apply on follower") + } + + // Should be cached + if future.Error() != ErrNotLeader { + t.Fatalf("should not apply on follower") + } +} + +func TestRaft_ApplyConcurrent(t *testing.T) { + // Make the cluster + conf := inmemConfig() + conf.HeartbeatTimeout = 80 * time.Millisecond + conf.ElectionTimeout = 80 * time.Millisecond + c := MakeCluster(3, t, conf) + defer c.Close() + + // Wait for a leader + leader := c.Leader() + + // Create a wait group + var group sync.WaitGroup + group.Add(100) + + applyF := func(i int) { + defer group.Done() + future := leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Concurrently apply + for i := 0; i < 100; i++ { + go applyF(i) + } + + // Wait to finish + doneCh := make(chan struct{}) + go func() { + group.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + // Check the FSMs + c.EnsureSame(t) +} + +func TestRaft_ApplyConcurrent_Timeout(t *testing.T) { + // Make the cluster + conf := inmemConfig() + conf.HeartbeatTimeout = 80 * time.Millisecond + conf.ElectionTimeout = 80 * time.Millisecond + c := MakeCluster(1, t, conf) + defer c.Close() + + // Wait for a leader + leader := c.Leader() + + // Enough enqueues should cause at least one timeout... + var didTimeout int32 = 0 + for i := 0; i < 200; i++ { + go func(i int) { + future := leader.Apply([]byte(fmt.Sprintf("test%d", i)), time.Microsecond) + if future.Error() == ErrEnqueueTimeout { + atomic.StoreInt32(&didTimeout, 1) + } + }(i) + } + + // Wait + time.Sleep(20 * time.Millisecond) + + // Some should have failed + if atomic.LoadInt32(&didTimeout) == 0 { + t.Fatalf("expected a timeout") + } +} + +func TestRaft_JoinNode(t *testing.T) { + // Make a cluster + c := MakeCluster(2, t, nil) + defer c.Close() + + // Apply a log to this cluster to ensure it is 'newer' + var future Future + leader := c.Leader() + future = leader.Apply([]byte("first"), 0) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } else { + log.Printf("[INFO] Applied log") + } + + // Make a new cluster of 1 + c1 := MakeCluster(1, t, nil) + + // Merge clusters + c.Merge(c1) + c.FullyConnect() + + // Wait until we have 2 leaders + limit := time.Now().Add(200 * time.Millisecond) + var leaders []*Raft + for time.Now().Before(limit) && len(leaders) != 2 { + time.Sleep(10 * time.Millisecond) + leaders = c.GetInState(Leader) + } + if len(leaders) != 2 { + t.Fatalf("expected two leader: %v", leaders) + } + + // Join the new node in + future = leader.AddPeer(c1.rafts[0].localAddr) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait until we have 2 followers + limit = time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 2 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 2 { + t.Fatalf("expected two followers: %v", followers) + } + + // Check the FSMs + c.EnsureSame(t) + + // Check the peers + c.EnsureSamePeers(t) + + // Ensure one leader + leader = c.Leader() + c.EnsureLeader(t, leader.localAddr) +} + +func TestRaft_RemoveFollower(t *testing.T) { + // Make a cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have 2 followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 2 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 2 { + t.Fatalf("expected two followers: %v", followers) + } + + // Remove a follower + follower := followers[0] + future := leader.RemovePeer(follower.localAddr) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait a while + time.Sleep(20 * time.Millisecond) + + // Other nodes should have fewer peers + if peers, _ := leader.peerStore.Peers(); len(peers) != 2 { + t.Fatalf("too many peers") + } + if peers, _ := followers[1].peerStore.Peers(); len(peers) != 2 { + t.Fatalf("too many peers") + } +} + +func TestRaft_RemoveLeader(t *testing.T) { + // Make a cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have 2 followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 2 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 2 { + t.Fatalf("expected two followers: %v", followers) + } + + // Remove the leader + leader.RemovePeer(leader.localAddr) + + // Wait a while + time.Sleep(20 * time.Millisecond) + + // Should have a new leader + newLeader := c.Leader() + + // Wait a bit for log application + time.Sleep(20 * time.Millisecond) + + // Other nodes should have fewer peers + if peers, _ := newLeader.peerStore.Peers(); len(peers) != 2 { + t.Fatalf("too many peers") + } + + // Old leader should be shutdown + if leader.State() != Shutdown { + t.Fatalf("leader should be shutdown") + } + + // Old leader should have no peers + if peers, _ := leader.peerStore.Peers(); len(peers) != 1 { + t.Fatalf("leader should have no peers") + } +} + +func TestRaft_RemoveLeader_NoShutdown(t *testing.T) { + // Make a cluster + conf := inmemConfig() + conf.ShutdownOnRemove = false + c := MakeCluster(3, t, conf) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have 2 followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 2 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 2 { + t.Fatalf("expected two followers: %v", followers) + } + + // Remove the leader + leader.RemovePeer(leader.localAddr) + + // Wait a while + time.Sleep(20 * time.Millisecond) + + // Should have a new leader + newLeader := c.Leader() + + // Wait a bit for log application + time.Sleep(20 * time.Millisecond) + + // Other nodes should have fewer peers + if peers, _ := newLeader.peerStore.Peers(); len(peers) != 2 { + t.Fatalf("too many peers") + } + + // Old leader should be a follower + if leader.State() != Follower { + t.Fatalf("leader should be shutdown") + } + + // Old leader should have no peers + if peers, _ := leader.peerStore.Peers(); len(peers) != 1 { + t.Fatalf("leader should have no peers") + } +} + +func TestRaft_RemoveLeader_SplitCluster(t *testing.T) { + // Enable operation after a remove + conf := inmemConfig() + conf.EnableSingleNode = true + conf.ShutdownOnRemove = false + conf.DisableBootstrapAfterElect = false + + // Make a cluster + c := MakeCluster(3, t, conf) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Remove the leader + leader.RemovePeer(leader.localAddr) + + // Wait until we have 2 leaders + limit := time.Now().Add(200 * time.Millisecond) + var leaders []*Raft + for time.Now().Before(limit) && len(leaders) != 2 { + time.Sleep(10 * time.Millisecond) + leaders = c.GetInState(Leader) + } + if len(leaders) != 2 { + t.Fatalf("expected two leader: %v", leaders) + } + + // Old leader should have no peers + if len(leader.peers) != 0 { + t.Fatalf("leader should have no peers") + } +} + +func TestRaft_AddKnownPeer(t *testing.T) { + // Make a cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + followers := c.GetInState(Follower) + + // Add a follower + future := leader.AddPeer(followers[0].localAddr) + + // Should be already added + if err := future.Error(); err != ErrKnownPeer { + t.Fatalf("err: %v", err) + } +} + +func TestRaft_RemoveUnknownPeer(t *testing.T) { + // Make a cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Remove unknown + future := leader.RemovePeer(NewInmemAddr()) + + // Should be already added + if err := future.Error(); err != ErrUnknownPeer { + t.Fatalf("err: %v", err) + } +} + +func TestRaft_SnapshotRestore(t *testing.T) { + // Make the cluster + conf := inmemConfig() + conf.TrailingLogs = 10 + c := MakeCluster(1, t, conf) + defer c.Close() + + // Commit a lot of things + leader := c.Leader() + var future Future + for i := 0; i < 100; i++ { + future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + } + + // Wait for the last future to apply + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Take a snapshot + snapFuture := leader.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Check for snapshot + if snaps, _ := leader.snapshots.List(); len(snaps) != 1 { + t.Fatalf("should have a snapshot") + } + + // Logs should be trimmed + if idx, _ := leader.logs.FirstIndex(); idx != 92 { + t.Fatalf("should trim logs to 92: %d", idx) + } + + // Shutdown + shutdown := leader.Shutdown() + if err := shutdown.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Restart the Raft + r := leader + r, err := NewRaft(r.conf, r.fsm, r.logs, r.stable, + r.snapshots, r.peerStore, r.trans) + if err != nil { + t.Fatalf("err: %v", err) + } + c.rafts[0] = r + + // We should have restored from the snapshot! + if last := r.getLastApplied(); last != 101 { + t.Fatalf("bad last: %v", last) + } +} + +func TestRaft_SnapshotRestore_PeerChange(t *testing.T) { + // Make the cluster + conf := inmemConfig() + conf.TrailingLogs = 10 + c := MakeCluster(3, t, conf) + defer c.Close() + + // Commit a lot of things + leader := c.Leader() + var future Future + for i := 0; i < 100; i++ { + future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + } + + // Wait for the last future to apply + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Take a snapshot + snapFuture := leader.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Shutdown + shutdown := leader.Shutdown() + if err := shutdown.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Make a separate cluster + c2 := MakeClusterNoPeers(2, t, conf) + defer c2.Close() + + // Kill the old cluster + for _, sec := range c.rafts { + if sec != leader { + sec.Shutdown() + } + } + + // Change the peer addresses + peers := []string{leader.trans.LocalAddr()} + for _, sec := range c2.rafts { + peers = append(peers, sec.trans.LocalAddr()) + } + + // Restart the Raft with new peers + r := leader + peerStore := &StaticPeers{StaticPeers: peers} + r, err := NewRaft(r.conf, r.fsm, r.logs, r.stable, + r.snapshots, peerStore, r.trans) + if err != nil { + t.Fatalf("err: %v", err) + } + c.rafts[0] = r + c2.rafts = append(c2.rafts, r) + c2.trans = append(c2.trans, r.trans.(*InmemTransport)) + c2.fsms = append(c2.fsms, r.fsm.(*MockFSM)) + c2.FullyConnect() + + // Wait a while + time.Sleep(50 * time.Millisecond) + + // Ensure we elect a leader, and that we replicate + // to our new followers + c2.EnsureSame(t) + + // We should have restored from the snapshot! + if last := r.getLastApplied(); last != 102 { + t.Fatalf("bad last: %v", last) + } +} + +func TestRaft_AutoSnapshot(t *testing.T) { + // Make the cluster + conf := inmemConfig() + conf.SnapshotInterval = 5 * time.Millisecond + conf.SnapshotThreshold = 50 + conf.TrailingLogs = 10 + c := MakeCluster(1, t, conf) + defer c.Close() + + // Commit a lot of things + leader := c.Leader() + var future Future + for i := 0; i < 100; i++ { + future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + } + + // Wait for the last future to apply + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait for a snapshot to happen + time.Sleep(50 * time.Millisecond) + + // Check for snapshot + if snaps, _ := leader.snapshots.List(); len(snaps) == 0 { + t.Fatalf("should have a snapshot") + } +} + +func TestRaft_SendSnapshotFollower(t *testing.T) { + // Make the cluster + conf := inmemConfig() + conf.TrailingLogs = 10 + c := MakeCluster(3, t, conf) + defer c.Close() + + // Disconnect one follower + followers := c.GetInState(Follower) + behind := followers[0] + c.Disconnect(behind.localAddr) + + // Commit a lot of things + leader := c.Leader() + var future Future + for i := 0; i < 100; i++ { + future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + } + + // Wait for the last future to apply + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } else { + log.Printf("[INFO] Finished apply without behind follower") + } + + // Snapshot, this will truncate logs! + for _, r := range c.rafts { + future = r.Snapshot() + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Reconnect the behind node + c.FullyConnect() + + // Ensure all the logs are the same + c.EnsureSame(t) +} + +func TestRaft_ReJoinFollower(t *testing.T) { + // Enable operation after a remove + conf := inmemConfig() + conf.ShutdownOnRemove = false + + // Make a cluster + c := MakeCluster(3, t, conf) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have 2 followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 2 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 2 { + t.Fatalf("expected two followers: %v", followers) + } + + // Remove a follower + follower := followers[0] + future := leader.RemovePeer(follower.localAddr) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait a while + time.Sleep(20 * time.Millisecond) + + // Other nodes should have fewer peers + if peers, _ := leader.peerStore.Peers(); len(peers) != 2 { + t.Fatalf("too many peers: %v", peers) + } + if peers, _ := followers[1].peerStore.Peers(); len(peers) != 2 { + t.Fatalf("too many peers: %v", peers) + } + + // Get the leader + time.Sleep(20 * time.Millisecond) + leader = c.Leader() + + // Rejoin. The follower will have a higher term than the leader, + // this will cause the leader to step down, and a new round of elections + // to take place. We should eventually re-stabilize. + future = leader.AddPeer(follower.localAddr) + if err := future.Error(); err != nil && err != ErrLeadershipLost { + t.Fatalf("err: %v", err) + } + + // Wait a while + time.Sleep(40 * time.Millisecond) + + // Other nodes should have fewer peers + if peers, _ := leader.peerStore.Peers(); len(peers) != 3 { + t.Fatalf("missing peers: %v", peers) + } + if peers, _ := followers[1].peerStore.Peers(); len(peers) != 3 { + t.Fatalf("missing peers: %v", peers) + } + + // Should be a follower now + if follower.State() != Follower { + t.Fatalf("bad state: %v", follower.State()) + } +} + +func TestRaft_LeaderLeaseExpire(t *testing.T) { + // Make a cluster + conf := inmemConfig() + c := MakeCluster(2, t, conf) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have a followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 1 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 1 { + t.Fatalf("expected a followers: %v", followers) + } + + // Disconnect the follower now + follower := followers[0] + log.Printf("[INFO] Disconnecting %v", follower) + c.Disconnect(follower.localAddr) + + // Watch the leaderCh + select { + case v := <-leader.LeaderCh(): + if v { + t.Fatalf("should step down as leader") + } + case <-time.After(conf.LeaderLeaseTimeout * 2): + t.Fatalf("timeout stepping down as leader") + } + + // Should be no leaders + if len(c.GetInState(Leader)) != 0 { + t.Fatalf("expected step down") + } + + // Verify no further contact + last := follower.LastContact() + time.Sleep(50 * time.Millisecond) + + // Check that last contact has not changed + if last != follower.LastContact() { + t.Fatalf("unexpected further contact") + } + + // Ensure both have cleared their leader + if l := leader.Leader(); l != "" { + t.Fatalf("bad: %v", l) + } + if l := follower.Leader(); l != "" { + t.Fatalf("bad: %v", l) + } +} + +func TestRaft_Barrier(t *testing.T) { + // Make the cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Commit a lot of things + for i := 0; i < 100; i++ { + leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) + } + + // Wait for a barrier complete + barrier := leader.Barrier(0) + + // Wait for the barrier future to apply + if err := barrier.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure all the logs are the same + c.EnsureSame(t) + if len(c.fsms[0].logs) != 100 { + t.Fatalf("Bad log length") + } +} + +func TestRaft_VerifyLeader(t *testing.T) { + // Make the cluster + c := MakeCluster(3, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Verify we are leader + verify := leader.VerifyLeader() + + // Wait for the verify to apply + if err := verify.Error(); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestRaft_VerifyLeader_Single(t *testing.T) { + // Make the cluster + c := MakeCluster(1, t, nil) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Verify we are leader + verify := leader.VerifyLeader() + + // Wait for the verify to apply + if err := verify.Error(); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestRaft_VerifyLeader_Fail(t *testing.T) { + // Make a cluster + conf := inmemConfig() + c := MakeCluster(2, t, conf) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have a followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 1 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 1 { + t.Fatalf("expected a followers: %v", followers) + } + + // Force follower to different term + follower := followers[0] + follower.setCurrentTerm(follower.getCurrentTerm() + 1) + + // Verify we are leader + verify := leader.VerifyLeader() + + // Wait for the leader to step down + if err := verify.Error(); err != ErrNotLeader && err != ErrLeadershipLost { + t.Fatalf("err: %v", err) + } + + // Ensure the known leader is cleared + if l := leader.Leader(); l != "" { + t.Fatalf("bad: %v", l) + } +} + +func TestRaft_VerifyLeader_ParitalConnect(t *testing.T) { + // Make a cluster + conf := inmemConfig() + c := MakeCluster(3, t, conf) + defer c.Close() + + // Get the leader + leader := c.Leader() + + // Wait until we have a followers + limit := time.Now().Add(200 * time.Millisecond) + var followers []*Raft + for time.Now().Before(limit) && len(followers) != 2 { + time.Sleep(10 * time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != 2 { + t.Fatalf("expected a followers: %v", followers) + } + + // Force partial disconnect + follower := followers[0] + log.Printf("[INFO] Disconnecting %v", follower) + c.Disconnect(follower.localAddr) + + // Verify we are leader + verify := leader.VerifyLeader() + + // Wait for the leader to step down + if err := verify.Error(); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestRaft_SettingPeers(t *testing.T) { + // Make the cluster + c := MakeClusterNoPeers(3, t, nil) + defer c.Close() + + peers := make([]string, 0) + for _, v := range c.rafts { + peers = append(peers, v.localAddr) + } + + for _, v := range c.rafts { + future := v.SetPeers(peers) + if err := future.Error(); err != nil { + t.Fatalf("error setting peers: %v", err) + } + } + + // Wait a while + time.Sleep(20 * time.Millisecond) + + // Should have a new leader + if leader := c.Leader(); leader == nil { + t.Fatalf("no leader?") + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go b/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go new file mode 100644 index 000000000..30541952d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go @@ -0,0 +1,513 @@ +package raft + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +const ( + maxFailureScale = 12 + failureWait = 10 * time.Millisecond +) + +var ( + // ErrLogNotFound indicates a given log entry is not available. + ErrLogNotFound = errors.New("log not found") + + // ErrPipelineReplicationNotSupported can be returned by the transport to + // signal that pipeline replication is not supported in general, and that + // no error message should be produced. + ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") +) + +type followerReplication struct { + peer string + inflight *inflight + + stopCh chan uint64 + triggerCh chan struct{} + + currentTerm uint64 + matchIndex uint64 + nextIndex uint64 + + lastContact time.Time + lastContactLock sync.RWMutex + + failures uint64 + + notifyCh chan struct{} + notify []*verifyFuture + notifyLock sync.Mutex + + // stepDown is used to indicate to the leader that we + // should step down based on information from a follower. + stepDown chan struct{} + + // allowPipeline is used to control it seems like + // pipeline replication should be enabled. + allowPipeline bool +} + +// notifyAll is used to notify all the waiting verify futures +// if the follower believes we are still the leader. +func (s *followerReplication) notifyAll(leader bool) { + // Clear the waiting notifies minimizing lock time + s.notifyLock.Lock() + n := s.notify + s.notify = nil + s.notifyLock.Unlock() + + // Submit our votes + for _, v := range n { + v.vote(leader) + } +} + +// LastContact returns the time of last contact. +func (s *followerReplication) LastContact() time.Time { + s.lastContactLock.RLock() + last := s.lastContact + s.lastContactLock.RUnlock() + return last +} + +// setLastContact sets the last contact to the current time. +func (s *followerReplication) setLastContact() { + s.lastContactLock.Lock() + s.lastContact = time.Now() + s.lastContactLock.Unlock() +} + +// replicate is a long running routine that is used to manage +// the process of replicating logs to our followers. +func (r *Raft) replicate(s *followerReplication) { + // Start an async heartbeating routing + stopHeartbeat := make(chan struct{}) + defer close(stopHeartbeat) + r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) + +RPC: + shouldStop := false + for !shouldStop { + select { + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.replicateTo(s, maxIndex) + } + return + case <-s.triggerCh: + shouldStop = r.replicateTo(s, r.getLastLogIndex()) + case <-randomTimeout(r.conf.CommitTimeout): + shouldStop = r.replicateTo(s, r.getLastLogIndex()) + } + + // If things looks healthy, switch to pipeline mode + if !shouldStop && s.allowPipeline { + goto PIPELINE + } + } + return + +PIPELINE: + // Disable until re-enabled + s.allowPipeline = false + + // Replicates using a pipeline for high performance. This method + // is not able to gracefully recover from errors, and so we fall back + // to standard mode on failure. + if err := r.pipelineReplicate(s); err != nil { + if err != ErrPipelineReplicationNotSupported { + r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) + } + } + goto RPC +} + +// replicateTo is used to replicate the logs up to a given last index. +// If the follower log is behind, we take care to bring them up to date. +func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { + // Create the base request + var req AppendEntriesRequest + var resp AppendEntriesResponse + var start time.Time +START: + // Prevent an excessive retry rate on errors + if s.failures > 0 { + select { + case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): + case <-r.shutdownCh: + } + } + + // Setup the request + if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { + goto SEND_SNAP + } else if err != nil { + return + } + + // Make the RPC call + start = time.Now() + if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) + s.failures++ + return + } + appendStats(s.peer, start, float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true + } + + // Update the last contact + s.setLastContact() + + // Update s based on success + if resp.Success { + // Update our replication state + updateLastAppended(s, &req) + + // Clear any failures, allow pipelining + s.failures = 0 + s.allowPipeline = true + } else { + s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) + s.matchIndex = s.nextIndex - 1 + s.failures++ + r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) + } + +CHECK_MORE: + // Check if there are more logs to replicate + if s.nextIndex <= lastIndex { + goto START + } + return + + // SEND_SNAP is used when we fail to get a log, usually because the follower + // is too far behind, and we must ship a snapshot down instead +SEND_SNAP: + if stop, err := r.sendLatestSnapshot(s); stop { + return true + } else if err != nil { + r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) + return + } + + // Check if there is more to replicate + goto CHECK_MORE +} + +// sendLatestSnapshot is used to send the latest snapshot we have +// down to our follower. +func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { + // Get the snapshots + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return false, err + } + + // Check we have at least a single snapshot + if len(snapshots) == 0 { + return false, fmt.Errorf("no snapshots found") + } + + // Open the most recent snapshot + snapID := snapshots[0].ID + meta, snapshot, err := r.snapshots.Open(snapID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) + return false, err + } + defer snapshot.Close() + + // Setup the request + req := InstallSnapshotRequest{ + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localAddr), + LastLogIndex: meta.Index, + LastLogTerm: meta.Term, + Peers: meta.Peers, + Size: meta.Size, + } + + // Make the call + start := time.Now() + var resp InstallSnapshotResponse + if err := r.trans.InstallSnapshot(s.peer, &req, &resp, snapshot); err != nil { + r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) + s.failures++ + return false, err + } + metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", s.peer}, start) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true, nil + } + + // Update the last contact + s.setLastContact() + + // Check for success + if resp.Success { + // Mark any inflight logs as committed + s.inflight.CommitRange(s.matchIndex+1, meta.Index) + + // Update the indexes + s.matchIndex = meta.Index + s.nextIndex = s.matchIndex + 1 + + // Clear any failures + s.failures = 0 + + // Notify we are still leader + s.notifyAll(true) + } else { + s.failures++ + r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) + } + return false, nil +} + +// heartbeat is used to periodically invoke AppendEntries on a peer +// to ensure they don't time out. This is done async of replicate(), +// since that routine could potentially be blocked on disk IO. +func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { + var failures uint64 + req := AppendEntriesRequest{ + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localAddr), + } + var resp AppendEntriesResponse + for { + // Wait for the next heartbeat interval or forced notify + select { + case <-s.notifyCh: + case <-randomTimeout(r.conf.HeartbeatTimeout / 10): + case <-stopCh: + return + } + + start := time.Now() + if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer, err) + failures++ + select { + case <-time.After(backoff(failureWait, failures, maxFailureScale)): + case <-stopCh: + } + } else { + s.setLastContact() + failures = 0 + metrics.MeasureSince([]string{"raft", "replication", "heartbeat", s.peer}, start) + s.notifyAll(resp.Success) + } + } +} + +// pipelineReplicate is used when we have synchronized our state with the follower, +// and want to switch to a higher performance pipeline mode of replication. +// We only pipeline AppendEntries commands, and if we ever hit an error, we fall +// back to the standard replication which can handle more complex situations. +func (r *Raft) pipelineReplicate(s *followerReplication) error { + // Create a new pipeline + pipeline, err := r.trans.AppendEntriesPipeline(s.peer) + if err != nil { + return err + } + defer pipeline.Close() + + // Log start and stop of pipeline + r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) + defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) + + // Create a shutdown and finish channel + stopCh := make(chan struct{}) + finishCh := make(chan struct{}) + + // Start a dedicated decoder + r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) + + // Start pipeline sends at the last good nextIndex + nextIndex := s.nextIndex + + shouldStop := false +SEND: + for !shouldStop { + select { + case <-finishCh: + break SEND + case maxIndex := <-s.stopCh: + if maxIndex > 0 { + r.pipelineSend(s, pipeline, &nextIndex, maxIndex) + } + break SEND + case <-s.triggerCh: + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, r.getLastLogIndex()) + case <-randomTimeout(r.conf.CommitTimeout): + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, r.getLastLogIndex()) + } + } + + // Stop our decoder, and wait for it to finish + close(stopCh) + select { + case <-finishCh: + case <-r.shutdownCh: + } + return nil +} + +// pipelineSend is used to send data over a pipeline. +func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { + // Create a new append request + req := new(AppendEntriesRequest) + if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { + return true + } + + // Pipeline the append entries + if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { + r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) + return true + } + + // Increase the next send log to avoid re-sending old logs + if n := len(req.Entries); n > 0 { + last := req.Entries[n-1] + *nextIdx = last.Index + 1 + } + return false +} + +// pipelineDecode is used to decode the responses of pipelined requests. +func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { + defer close(finishCh) + respCh := p.Consumer() + for { + select { + case ready := <-respCh: + req, resp := ready.Request(), ready.Response() + appendStats(s.peer, ready.Start(), float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return + } + + // Update the last contact + s.setLastContact() + + // Abort pipeline if not successful + if !resp.Success { + return + } + + // Update our replication state + updateLastAppended(s, req) + case <-stopCh: + return + } + } +} + +// setupAppendEntries is used to setup an append entries request. +func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + req.Term = s.currentTerm + req.Leader = r.trans.EncodePeer(r.localAddr) + req.LeaderCommitIndex = r.getCommitIndex() + if err := r.setPreviousLog(req, nextIndex); err != nil { + return err + } + if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { + return err + } + return nil +} + +// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an +// AppendEntriesRequest given the next index to replicate. +func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { + // Guard for the first index, since there is no 0 log entry + // Guard against the previous index being a snapshot as well + if nextIndex == 1 { + req.PrevLogEntry = 0 + req.PrevLogTerm = 0 + + } else if (nextIndex - 1) == r.getLastSnapshotIndex() { + req.PrevLogEntry = r.getLastSnapshotIndex() + req.PrevLogTerm = r.getLastSnapshotTerm() + + } else { + var l Log + if err := r.logs.GetLog(nextIndex-1, &l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", + nextIndex-1, err) + return err + } + + // Set the previous index and term (0 if nextIndex is 1) + req.PrevLogEntry = l.Index + req.PrevLogTerm = l.Term + } + return nil +} + +// setNewLogs is used to setup the logs which should be appended for a request. +func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + // Append up to MaxAppendEntries or up to the lastIndex + req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) + maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) + for i := nextIndex; i <= maxIndex; i++ { + oldLog := new(Log) + if err := r.logs.GetLog(i, oldLog); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) + return err + } + req.Entries = append(req.Entries, oldLog) + } + return nil +} + +// appendStats is used to emit stats about an AppendEntries invocation. +func appendStats(peer string, start time.Time, logs float32) { + metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) + metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) +} + +// handleStaleTerm is used when a follower indicates that we have a stale term. +func (r *Raft) handleStaleTerm(s *followerReplication) { + r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) + s.notifyAll(false) // No longer leader + asyncNotifyCh(s.stepDown) +} + +// updateLastAppended is used to update follower replication state after a successful +// AppendEntries RPC. +func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { + // Mark any inflight logs as committed + if logs := req.Entries; len(logs) > 0 { + first := logs[0] + last := logs[len(logs)-1] + s.inflight.CommitRange(first.Index, last.Index) + + // Update the indexes + s.matchIndex = last.Index + s.nextIndex = last.Index + 1 + } + + // Notify still leader + s.notifyAll(true) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go new file mode 100644 index 000000000..7151f43ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go @@ -0,0 +1,40 @@ +package raft + +import ( + "io" +) + +// SnapshotMeta is for metadata of a snapshot. +type SnapshotMeta struct { + ID string // ID is opaque to the store, and is used for opening + Index uint64 + Term uint64 + Peers []byte + Size int64 +} + +// SnapshotStore interface is used to allow for flexible implementations +// of snapshot storage and retrieval. For example, a client could implement +// a shared state store such as S3, allowing new nodes to restore snapshots +// without steaming from the leader. +type SnapshotStore interface { + // Create is used to begin a snapshot at a given index and term, + // with the current peer set already encoded. + Create(index, term uint64, peers []byte) (SnapshotSink, error) + + // List is used to list the available snapshots in the store. + // It should return then in descending order, with the highest index first. + List() ([]*SnapshotMeta, error) + + // Open takes a snapshot ID and provides a ReadCloser. Once close is + // called it is assumed the snapshot is no longer needed. + Open(id string) (*SnapshotMeta, io.ReadCloser, error) +} + +// SnapshotSink is returned by StartSnapshot. The FSM will Write state +// to the sink and call Close on completion. On error, Cancel will be invoked. +type SnapshotSink interface { + io.WriteCloser + ID() string + Cancel() error +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go b/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go new file mode 100644 index 000000000..4588ea8a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go @@ -0,0 +1,15 @@ +package raft + +// StableStore is used to provide stable storage +// of key configurations to ensure safety. +type StableStore interface { + Set(key []byte, val []byte) error + + // Get returns the value for key, or an empty byte slice if key was not found. + Get(key []byte) ([]byte, error) + + SetUint64(key []byte, val uint64) error + + // GetUint64 returns the uint64 value for key, or 0 if key was not found. + GetUint64(key []byte) (uint64, error) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/state.go b/Godeps/_workspace/src/github.com/hashicorp/raft/state.go new file mode 100644 index 000000000..41e80a1b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/state.go @@ -0,0 +1,169 @@ +package raft + +import ( + "sync/atomic" +) + +// RaftState captures the state of a Raft node: Follower, Candidate, Leader, +// or Shutdown. +type RaftState uint32 + +const ( + // Follower is the initial state of a Raft node. + Follower RaftState = iota + + // Candidate is one of the valid states of a Raft node. + Candidate + + // Leader is one of the valid states of a Raft node. + Leader + + // Shutdown is the terminal state of a Raft node. + Shutdown +) + +func (s RaftState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Shutdown: + return "Shutdown" + default: + return "Unknown" + } +} + +// raftState is used to maintain various state variables +// and provides an interface to set/get the variables in a +// thread safe manner. +type raftState struct { + // The current term, cache of StableStore + currentTerm uint64 + + // Cache the latest log from LogStore + LastLogIndex uint64 + LastLogTerm uint64 + + // Highest committed log entry + commitIndex uint64 + + // Last applied log to the FSM + lastApplied uint64 + + // Cache the latest snapshot index/term + lastSnapshotIndex uint64 + lastSnapshotTerm uint64 + + // Tracks the number of live routines + runningRoutines int32 + + // The current state + state RaftState +} + +func (r *raftState) getState() RaftState { + stateAddr := (*uint32)(&r.state) + return RaftState(atomic.LoadUint32(stateAddr)) +} + +func (r *raftState) setState(s RaftState) { + stateAddr := (*uint32)(&r.state) + atomic.StoreUint32(stateAddr, uint32(s)) +} + +func (r *raftState) getCurrentTerm() uint64 { + return atomic.LoadUint64(&r.currentTerm) +} + +func (r *raftState) setCurrentTerm(term uint64) { + atomic.StoreUint64(&r.currentTerm, term) +} + +func (r *raftState) getLastLogIndex() uint64 { + return atomic.LoadUint64(&r.LastLogIndex) +} + +func (r *raftState) setLastLogIndex(term uint64) { + atomic.StoreUint64(&r.LastLogIndex, term) +} + +func (r *raftState) getLastLogTerm() uint64 { + return atomic.LoadUint64(&r.LastLogTerm) +} + +func (r *raftState) setLastLogTerm(term uint64) { + atomic.StoreUint64(&r.LastLogTerm, term) +} + +func (r *raftState) getCommitIndex() uint64 { + return atomic.LoadUint64(&r.commitIndex) +} + +func (r *raftState) setCommitIndex(term uint64) { + atomic.StoreUint64(&r.commitIndex, term) +} + +func (r *raftState) getLastApplied() uint64 { + return atomic.LoadUint64(&r.lastApplied) +} + +func (r *raftState) setLastApplied(term uint64) { + atomic.StoreUint64(&r.lastApplied, term) +} + +func (r *raftState) getLastSnapshotIndex() uint64 { + return atomic.LoadUint64(&r.lastSnapshotIndex) +} + +func (r *raftState) setLastSnapshotIndex(term uint64) { + atomic.StoreUint64(&r.lastSnapshotIndex, term) +} + +func (r *raftState) getLastSnapshotTerm() uint64 { + return atomic.LoadUint64(&r.lastSnapshotTerm) +} + +func (r *raftState) setLastSnapshotTerm(term uint64) { + atomic.StoreUint64(&r.lastSnapshotTerm, term) +} + +func (r *raftState) incrRoutines() { + atomic.AddInt32(&r.runningRoutines, 1) +} + +func (r *raftState) decrRoutines() { + atomic.AddInt32(&r.runningRoutines, -1) +} + +func (r *raftState) getRoutines() int32 { + return atomic.LoadInt32(&r.runningRoutines) +} + +// Start a goroutine and properly handle the race between a routine +// starting and incrementing, and exiting and decrementing. +func (r *raftState) goFunc(f func()) { + r.incrRoutines() + go func() { + defer r.decrRoutines() + f() + }() +} + +// getLastIndex returns the last index in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastIndex() uint64 { + return max(r.getLastLogIndex(), r.getLastSnapshotIndex()) +} + +// getLastEntry returns the last index and term in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastEntry() (uint64, uint64) { + if r.getLastLogIndex() >= r.getLastSnapshotIndex() { + return r.getLastLogIndex(), r.getLastLogTerm() + } + return r.getLastSnapshotIndex(), r.getLastSnapshotTerm() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go new file mode 100644 index 000000000..1b1ea9c35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go @@ -0,0 +1,80 @@ +package raft + +import ( + "errors" + "io" + "net" + "time" +) + +var ( + errNotAdvertisable = errors.New("local bind address is not advertisable") + errNotTCP = errors.New("local address is not a TCP address") +) + +// TCPStreamLayer implements StreamLayer interface for plain TCP. +type TCPStreamLayer struct { + advertise net.Addr + listener *net.TCPListener +} + +// NewTCPTransport returns a NetworkTransport that is built on top of +// a TCP streaming transport layer. +func NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) (*NetworkTransport, error) { + // Try to bind + list, err := net.Listen("tcp", bindAddr) + if err != nil { + return nil, err + } + + // Create stream + stream := &TCPStreamLayer{ + advertise: advertise, + listener: list.(*net.TCPListener), + } + + // Verify that we have a usable advertise address + addr, ok := stream.Addr().(*net.TCPAddr) + if !ok { + list.Close() + return nil, errNotTCP + } + if addr.IP.IsUnspecified() { + list.Close() + return nil, errNotAdvertisable + } + + // Create the network transport + trans := NewNetworkTransport(stream, maxPool, timeout, logOutput) + return trans, nil +} + +// Dial implements the StreamLayer interface. +func (t *TCPStreamLayer) Dial(address string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", address, timeout) +} + +// Accept implements the net.Listener interface. +func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { + return t.listener.Accept() +} + +// Close implements the net.Listener interface. +func (t *TCPStreamLayer) Close() (err error) { + return t.listener.Close() +} + +// Addr implements the net.Listener interface. +func (t *TCPStreamLayer) Addr() net.Addr { + // Use an advertise addr if provided + if t.advertise != nil { + return t.advertise + } + return t.listener.Addr() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go new file mode 100644 index 000000000..22d59da2a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go @@ -0,0 +1,24 @@ +package raft + +import ( + "net" + "testing" +) + +func TestTCPTransport_BadAddr(t *testing.T) { + _, err := NewTCPTransport("0.0.0.0:0", nil, 1, 0, nil) + if err != errNotAdvertisable { + t.Fatalf("err: %v", err) + } +} + +func TestTCPTransport_WithAdvertise(t *testing.T) { + addr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 12345} + trans, err := NewTCPTransport("0.0.0.0:0", addr, 1, 0, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if trans.LocalAddr() != "127.0.0.1:12345" { + t.Fatalf("bad: %v", trans.LocalAddr()) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go new file mode 100644 index 000000000..8928de0c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go @@ -0,0 +1,85 @@ +package raft + +import ( + "io" + "time" +) + +// RPCResponse captures both a response and a potential error. +type RPCResponse struct { + Response interface{} + Error error +} + +// RPC has a command, and provides a response mechanism. +type RPC struct { + Command interface{} + Reader io.Reader // Set only for InstallSnapshot + RespChan chan<- RPCResponse +} + +// Respond is used to respond with a response, error or both +func (r *RPC) Respond(resp interface{}, err error) { + r.RespChan <- RPCResponse{resp, err} +} + +// Transport provides an interface for network transports +// to allow Raft to communicate with other nodes. +type Transport interface { + // Consumer returns a channel that can be used to + // consume and respond to RPC requests. + Consumer() <-chan RPC + + // LocalAddr is used to return our local address to distinguish from our peers. + LocalAddr() string + + // AppendEntriesPipeline returns an interface that can be used to pipeline + // AppendEntries requests. + AppendEntriesPipeline(target string) (AppendPipeline, error) + + // AppendEntries sends the appropriate RPC to the target node. + AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error + + // RequestVote sends the appropriate RPC to the target node. + RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error + + // InstallSnapshot is used to push a snapshot down to a follower. The data is read from + // the ReadCloser and streamed to the client. + InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error + + // EncodePeer is used to serialize a peer name. + EncodePeer(string) []byte + + // DecodePeer is used to deserialize a peer name. + DecodePeer([]byte) string + + // SetHeartbeatHandler is used to setup a heartbeat handler + // as a fast-pass. This is to avoid head-of-line blocking from + // disk IO. If a Transport does not support this, it can simply + // ignore the call, and push the heartbeat onto the Consumer channel. + SetHeartbeatHandler(cb func(rpc RPC)) +} + +// AppendPipeline is used for pipelining AppendEntries requests. It is used +// to increase the replication throughput by masking latency and better +// utilizing bandwidth. +type AppendPipeline interface { + // AppendEntries is used to add another request to the pipeline. + // The send may block which is an effective form of back-pressure. + AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) + + // Consumer returns a channel that can be used to consume + // response futures when they are ready. + Consumer() <-chan AppendFuture + + // Closes pipeline and cancels all inflight RPCs + Close() error +} + +// AppendFuture is used to return information about a pipelined AppendEntries request. +type AppendFuture interface { + Future + Start() time.Time + Request() *AppendEntriesRequest + Response() *AppendEntriesResponse +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/util.go b/Godeps/_workspace/src/github.com/hashicorp/raft/util.go new file mode 100644 index 000000000..a6642c4c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/util.go @@ -0,0 +1,200 @@ +package raft + +import ( + "bytes" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math" + "math/big" + "math/rand" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +func init() { + // Ensure we use a high-entropy seed for the psuedo-random generator + rand.Seed(newSeed()) +} + +// returns an int64 from a crypto random source +// can be used to seed a source for a math/rand. +func newSeed() int64 { + r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + return r.Int64() +} + +// randomTimeout returns a value that is between the minVal and 2x minVal. +func randomTimeout(minVal time.Duration) <-chan time.Time { + if minVal == 0 { + return nil + } + extra := (time.Duration(rand.Int63()) % minVal) + return time.After(minVal + extra) +} + +// min returns the minimum. +func min(a, b uint64) uint64 { + if a <= b { + return a + } + return b +} + +// max returns the maximum. +func max(a, b uint64) uint64 { + if a >= b { + return a + } + return b +} + +// generateUUID is used to generate a random UUID. +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +// asyncNotify is used to do an async channel send to +// a list of channels. This will not block. +func asyncNotify(chans []chan struct{}) { + for _, ch := range chans { + asyncNotifyCh(ch) + } +} + +// asyncNotifyCh is used to do an async channel send +// to a single channel without blocking. +func asyncNotifyCh(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// asyncNotifyBool is used to do an async notification +// on a bool channel. +func asyncNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + default: + } +} + +// ExcludePeer is used to exclude a single peer from a list of peers. +func ExcludePeer(peers []string, peer string) []string { + otherPeers := make([]string, 0, len(peers)) + for _, p := range peers { + if p != peer { + otherPeers = append(otherPeers, p) + } + } + return otherPeers +} + +// PeerContained checks if a given peer is contained in a list. +func PeerContained(peers []string, peer string) bool { + for _, p := range peers { + if p == peer { + return true + } + } + return false +} + +// AddUniquePeer is used to add a peer to a list of existing +// peers only if it is not already contained. +func AddUniquePeer(peers []string, peer string) []string { + if PeerContained(peers, peer) { + return peers + } + return append(peers, peer) +} + +// encodePeers is used to serialize a list of peers. +func encodePeers(peers []string, trans Transport) []byte { + // Encode each peer + var encPeers [][]byte + for _, p := range peers { + encPeers = append(encPeers, trans.EncodePeer(p)) + } + + // Encode the entire array + buf, err := encodeMsgPack(encPeers) + if err != nil { + panic(fmt.Errorf("failed to encode peers: %v", err)) + } + + return buf.Bytes() +} + +// decodePeers is used to deserialize a list of peers. +func decodePeers(buf []byte, trans Transport) []string { + // Decode the buffer first + var encPeers [][]byte + if err := decodeMsgPack(buf, &encPeers); err != nil { + panic(fmt.Errorf("failed to decode peers: %v", err)) + } + + // Deserialize each peer + var peers []string + for _, enc := range encPeers { + peers = append(peers, trans.DecodePeer(enc)) + } + + return peers +} + +// Decode reverses the encode operation on a byte slice input. +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer. +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Converts bytes to an integer. +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Converts a uint64 to a byte slice. +func uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} + +// backoff is used to compute an exponential backoff +// duration. Base time is scaled by the current round, +// up to some maximum scale factor. +func backoff(base time.Duration, round, limit uint64) time.Duration { + power := min(round, limit) + for power > 2 { + base *= 2 + power-- + } + return base +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go new file mode 100644 index 000000000..191510972 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go @@ -0,0 +1,187 @@ +package raft + +import ( + "reflect" + "regexp" + "testing" + "time" +) + +func TestRandomTimeout(t *testing.T) { + start := time.Now() + timeout := randomTimeout(time.Millisecond) + + select { + case <-timeout: + diff := time.Now().Sub(start) + if diff < time.Millisecond { + t.Fatalf("fired early") + } + case <-time.After(3 * time.Millisecond): + t.Fatalf("timeout") + } +} + +func TestNewSeed(t *testing.T) { + vals := make(map[int64]bool) + for i := 0; i < 1000; i++ { + seed := newSeed() + if _, exists := vals[seed]; exists { + t.Fatal("newSeed() return a value it'd previously returned") + } + vals[seed] = true + } +} + +func TestRandomTimeout_NoTime(t *testing.T) { + timeout := randomTimeout(0) + if timeout != nil { + t.Fatalf("expected nil channel") + } +} + +func TestMin(t *testing.T) { + if min(1, 1) != 1 { + t.Fatalf("bad min") + } + if min(2, 1) != 1 { + t.Fatalf("bad min") + } + if min(1, 2) != 1 { + t.Fatalf("bad min") + } +} + +func TestMax(t *testing.T) { + if max(1, 1) != 1 { + t.Fatalf("bad max") + } + if max(2, 1) != 2 { + t.Fatalf("bad max") + } + if max(1, 2) != 2 { + t.Fatalf("bad max") + } +} + +func TestGenerateUUID(t *testing.T) { + prev := generateUUID() + for i := 0; i < 100; i++ { + id := generateUUID() + if prev == id { + t.Fatalf("Should get a new ID!") + } + + matched, err := regexp.MatchString( + `[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}`, id) + if !matched || err != nil { + t.Fatalf("expected match %s %v %s", id, matched, err) + } + } +} + +func TestAsyncNotify(t *testing.T) { + chs := []chan struct{}{ + make(chan struct{}), + make(chan struct{}, 1), + make(chan struct{}, 2), + } + + // Should not block! + asyncNotify(chs) + asyncNotify(chs) + asyncNotify(chs) + + // Try to read + select { + case <-chs[0]: + t.Fatalf("should not have message!") + default: + } + select { + case <-chs[1]: + default: + t.Fatalf("should have message!") + } + select { + case <-chs[2]: + default: + t.Fatalf("should have message!") + } + select { + case <-chs[2]: + default: + t.Fatalf("should have message!") + } +} + +func TestExcludePeer(t *testing.T) { + peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} + peer := peers[2] + + after := ExcludePeer(peers, peer) + if len(after) != 2 { + t.Fatalf("Bad length") + } + if after[0] == peer || after[1] == peer { + t.Fatalf("should not contain peer") + } +} + +func TestPeerContained(t *testing.T) { + peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} + + if !PeerContained(peers, peers[2]) { + t.Fatalf("Expect contained") + } + if PeerContained(peers, NewInmemAddr()) { + t.Fatalf("unexpected contained") + } +} + +func TestAddUniquePeer(t *testing.T) { + peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} + after := AddUniquePeer(peers, peers[2]) + if !reflect.DeepEqual(after, peers) { + t.Fatalf("unexpected append") + } + after = AddUniquePeer(peers, NewInmemAddr()) + if len(after) != 4 { + t.Fatalf("expected append") + } +} + +func TestEncodeDecodePeers(t *testing.T) { + peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} + _, trans := NewInmemTransport() + + // Try to encode/decode + buf := encodePeers(peers, trans) + decoded := decodePeers(buf, trans) + + if !reflect.DeepEqual(peers, decoded) { + t.Fatalf("mismatch %v %v", peers, decoded) + } +} + +func TestBackoff(t *testing.T) { + b := backoff(10*time.Millisecond, 1, 8) + if b != 10*time.Millisecond { + t.Fatalf("bad: %v", b) + } + + b = backoff(20*time.Millisecond, 2, 8) + if b != 20*time.Millisecond { + t.Fatalf("bad: %v", b) + } + + b = backoff(10*time.Millisecond, 8, 8) + if b != 640*time.Millisecond { + t.Fatalf("bad: %v", b) + } + + b = backoff(10*time.Millisecond, 9, 8) + if b != 640*time.Millisecond { + t.Fatalf("bad: %v", b) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md new file mode 100644 index 000000000..883941e90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md @@ -0,0 +1,207 @@ +# InfluxDB Client + +[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client) + +## Description + +A Go client library written and maintained by the **InfluxDB** team. +This package provides convenience functions to read and write time series data. +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. + + +## Getting Started + +### Connecting To Your Database + +Connecting to an **InfluxDB** database is straightforward. You will need a host +name, a port and the cluster user credentials if applicable. The default port is 8086. +You can customize these settings to your specific installation via the +**InfluxDB** configuration file. + +Thought not necessary for experimentation, you may want to create a new user +and authenticate the connection to your database. + +For more information please check out the +[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html). + +For the impatient, you can create a new admin user _bubba_ by firing off the +[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). + +```shell +influx +> create user bubba with password 'bumblebeetuna' +> grant all privileges to bubba +``` + +And now for good measure set the credentials in you shell environment. +In the example below we will use $INFLUX_USER and $INFLUX_PWD + +Now with the administrivia out of the way, let's connect to our database. + +NOTE: If you've opted out of creating a user, you can omit Username and Password in +the configuration below. + +```go +package main + +import "github.com/influxdb/influxdb/client" +import "net/url" + +const ( + MyHost = "localhost" + MyPort = 8086 + MyDB = "square_holes" + MyMeasurement = "shapes" +) + +func main() { + u, err := url.Parse(fmt.Sprintf("http://%s:%d", MyHost, MyPort)) + if err != nil { + log.Fatal(err) + } + + conf := client.Config{ + URL: *u, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a Hippo! %v, %s", dur, ver) +} + +``` + +### Inserting Data + +Time series data aka *points* are written to the database using batch inserts. +The mechanism is to create one or more points and then create a batch aka *batch points* +and write these to a given database and series. A series is a combination of a +measurement (time/values) and a set of tags. + +In this sample we will create a batch of a 1,000 points. Each point has a time and +a single value as well as 2 tags indicating a shape and color. We write these points +to a database called _square_holes_ using a measurement named _shapes_. + +NOTE: You can specify a RetentionPolicy as part of the batch points. If not +provided InfluxDB will use the database _default_ retention policy. By default, the _default_ +retention policy never deletes any data it contains. + +```go +func writePoints(con *client.Client) { + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: MyDB, + RetentionPolicy: "default", + } + _, err := con.Write(bps) + if err != nil { + log.Fatal(err) + } +} +``` + + +### Querying Data + +One nice advantage of using **InfluxDB** the ability to query your data using familiar +SQL constructs. In this example we can create a convenience function to query the database +as follows: + +```go +// queryDB convenience function to query the database +func queryDB(con *client.Client, cmd string) (res []client.Result, err error) { + q := client.Query{ + Command: cmd, + Database: MyDB, + } + if response, err := con.Query(q); err == nil { + if response.Error() != nil { + return res, response.Error() + } + res = response.Results + } + return +} +``` + +#### Creating a Database +```go +_, err := queryDB(con, fmt.Sprintf("create database %s", MyDB)) +if err != nil { + log.Fatal(err) +} +``` + +#### Count Records +```go +q := fmt.Sprintf("select count(%s) from %s", "value", MyMeasurement) +res, err := queryDB(con, q) +if err != nil { + log.Fatal(err) +} +count := res[0].Series[0].Values[0][1] +log.Printf("Found a total of `%v records", count) + +``` + +#### Find the last 10 _shapes_ records + +```go +q := fmt.Sprintf("select * from %s limit %d", MyMeasurement, 20) +res, err = queryDB(con, q) +if err != nil { + log.Fatal(err) +} + +for i, row := range res[0].Series[0].Values { + t, err := time.Parse(time.RFC3339, row[0].(string)) + if err != nil { + log.Fatal(err) + } + val, err := row[1].(json.Number).Int64() + log.Printf("[%2d] %s: %03d\n", i, t.Format(time.Stamp), val) +} +``` + +## Go Docs + +Please refer to +[http://godoc.org/github.com/influxdb/influxdb/client](http://godoc.org/github.com/influxdb/influxdb/client) +for documentation. + +## See Also + +You can also examine how the client library is used by the +[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go new file mode 100644 index 000000000..58805ceea --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go @@ -0,0 +1,113 @@ +package client_test + +import ( + "fmt" + "log" + "math/rand" + "net/url" + "os" + "strconv" + "time" + + "github.com/influxdb/influxdb/client" +) + +func ExampleNewClient() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just ommit Username/Password below. + conf := client.Config{ + URL: *host, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + log.Println("Connection", con) +} + +func ExampleClient_Ping() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a hippo! %v, %s", dur, ver) +} + +func ExampleClient_Query() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + q := client.Query{ + Command: "select count(value) from shapes", + Database: "square_holes", + } + if response, err := con.Query(q); err == nil && response.Error() == nil { + log.Println(response.Results) + } +} + +func ExampleClient_Write() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: "BumbeBeeTuna", + RetentionPolicy: "default", + } + _, err = con.Write(bps) + if err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go new file mode 100644 index 000000000..c4f34d84c --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go @@ -0,0 +1,656 @@ +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/tsdb" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + if strings.Contains(path, ":") { + h := strings.Split(path, ":") + i, e := strconv.Atoi(h[1]) + if e != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, e) + } + port = i + if h[0] == "" { + host = DefaultHost + } else { + host = h[0] + } + } else { + host = path + // If they didn't specify a port, always use the default port + port = DefaultPort + } + + u := url.URL{ + Scheme: "http", + } + if ssl { + u.Scheme = "https" + } + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + Username string + Password string + UserAgent string + Timeout time.Duration +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + username string + password string + httpClient *http.Client + userAgent string +} + +const ( + ConsistencyOne = "one" + ConsistencyAll = "all" + ConsistencyQuorum = "quorum" + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + client := Client{ + url: c.URL, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout}, + userAgent: c.UserAgent, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + u := c.url + + u.Path = "query" + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + u.RawQuery = values.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, decErr + } + // If we don't have an error in our json response, and didn't get statusOK, then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = "write" + + var b bytes.Buffer + for _, p := range bp.Points { + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", bp.Precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = "write" + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []influxql.Row + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []influxql.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []influxql.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +func (p *Point) MarshalString() string { + return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String() +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + return c.url.String() +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go new file mode 100644 index 000000000..0a6df042e --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go @@ -0,0 +1,530 @@ +package client_test + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/influxdb/influxdb/client" +) + +func BenchmarkUnmarshalJSON2Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func BenchmarkUnmarshalJSON10Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1", + "tag1": "value1", + "tag2": "value2", + "tag2": "value3", + "tag4": "value4", + "tag5": "value5", + "tag6": "value6", + "tag7": "value7", + "tag8": "value8" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func TestNewClient(t *testing.T) { + config := client.Config{} + _, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + d, version, err := c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if d == 0 { + t.Fatalf("expected a duration greater than zero. actual %v", d) + } + if version != "x.x" { + t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) + } +} + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + w.WriteHeader(http.StatusNoContent) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + u.User = url.UserPassword("username", "password") + config := client.Config{URL: *u, Username: "username", Password: "password"} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{} + r, err := c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, UserAgent: test.userAgent} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + receivedUserAgent = "" + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp := client.BatchPoints{} + _, err = c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestPoint_UnmarshalEpoch(t *testing.T) { + now := time.Now() + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + { + name: "nanoseconds", + epoch: now.UnixNano(), + precision: "n", + expected: now, + }, + { + name: "microseconds", + epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), + precision: "u", + expected: now.Round(time.Microsecond), + }, + { + name: "milliseconds", + epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), + precision: "ms", + expected: now.Round(time.Millisecond), + }, + { + name: "seconds", + epoch: now.Round(time.Second).UnixNano() / int64(time.Second), + precision: "s", + expected: now.Round(time.Second), + }, + { + name: "minutes", + epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), + precision: "m", + expected: now.Round(time.Minute), + }, + { + name: "hours", + epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), + precision: "h", + expected: now.Round(time.Hour), + }, + { + name: "max int64", + epoch: 9223372036854775807, + precision: "n", + expected: time.Unix(0, 9223372036854775807), + }, + { + name: "100 years from now", + epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), + precision: "n", + expected: now.Add(time.Hour * 24 * 365 * 100), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_UnmarshalRFC(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + rfc string + now time.Time + expected time.Time + }{ + { + name: "RFC3339Nano", + rfc: time.RFC3339Nano, + now: now, + expected: now, + }, + { + name: "RFC3339", + rfc: time.RFC3339, + now: now.Round(time.Second), + expected: now.Round(time.Second), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + ts := test.now.Format(test.rfc) + data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_MarshalOmitempty(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + point client.Point + now time.Time + expected string + }{ + { + name: "all empty", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1}}`, + }, + { + name: "with time", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, + now: now, + expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), + }, + { + name: "with tags", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, + now: now, + expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, + }, + { + name: "with precision", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + b, err := json.Marshal(&test.point) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if test.expected != string(b) { + t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) + } + } +} + +func TestEpochToTime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, + {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, + {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, + {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, + {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, + {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + tm, e := client.EpochToTime(test.epoch, test.precision) + if e != nil { + t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) + } + if tm != test.expected { + t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) + } + } +} + +// helper functions + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Influxdb-Version", "x.x") + return + })) +} + +// Ensure that data with epoch times can be decoded. +func TestBatchPoints_Normal(t *testing.T) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069373, + "precision": "n", + "values": { + "value": 4541770385657154000 + } + }, + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069380, + "precision": "n", + "values": { + "value": 7199311900554737000 + } + } + ] +} +`) + + if err := json.Unmarshal(data, &bp); err != nil { + t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } +} + +func TestClient_Timeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err == nil { + t.Fatalf("unexpected success. expected timeout error") + } else if !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("unexpected error. expected 'use of closed network connection' error, got %v", err) + } + + confignotimeout := client.Config{URL: *u} + cnotimeout, err := client.NewClient(confignotimeout) + _, err = cnotimeout.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md new file mode 100644 index 000000000..3ef272f41 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md @@ -0,0 +1,650 @@ +# The Influx Query Language Specification + +## Introduction + +This is a reference for the Influx Query Language ("InfluxQL"). + +InfluxQL is a SQL-like query language for interacting with InfluxDB. It has been lovingly crafted to feel familiar to those coming from other SQL or SQL-like environments while providing features specific to storing and analyzing time series data. + +## Notation + +The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the same notation used in the [Go](http://golang.org) programming language specification, which can be found [here](https://golang.org/ref/spec). Not so coincidentally, InfluxDB is written in Go. + +``` +Production = production_name "=" [ Expression ] "." . +Expression = Alternative { "|" Alternative } . +Alternative = Term { Term } . +Term = production_name | token [ "…" token ] | Group | Option | Repetition . +Group = "(" Expression ")" . +Option = "[" Expression "]" . +Repetition = "{" Expression "}" . +``` + +Notation operators in order of increasing precedence: + +``` +| alternation +() grouping +[] option (0 or 1 times) +{} repetition (0 to n times) +``` + +## Query representation + +### Characters + +InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). + +``` +newline = /* the Unicode code point U+000A */ . +unicode_char = /* an arbitrary Unicode code point except newline */ . +``` + +## Letters and digits + +Letters are the set of ASCII characters plus the underscore character _ (U+005F) is considered a letter. + +Only decimal digits are supported. + +``` +letter = ascii_letter | "_" . +ascii_letter = "A" … "Z" | "a" … "z" . +digit = "0" … "9" . +``` + +## Identifiers + +Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field names. + +The rules: + +- double quoted identifiers can contain any unicode character other than a new line +- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) +- unquoted identifiers must start with an upper or lowercase ASCII character or "_" +- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" + +``` +identifier = unquoted_identifier | quoted_identifier . +unquoted_identifier = ( letter ) { letter | digit } . +quoted_identifier = `"` unicode_char { unicode_char } `"` . +``` + +#### Examples: + +``` +cpu +_cpu_stats +"1h" +"anything really" +"1_Crazy-1337.identifier>NAMEðŸ‘" +``` + +## Keywords + +``` +ALL ALTER AS ASC BEGIN BY +CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE +DESC DROP DURATION END EXISTS EXPLAIN +FIELD FROM GRANT GROUP IF IN +INNER INSERT INTO KEY KEYS LIMIT +SHOW MEASUREMENT MEASUREMENTS OFFSET ON ORDER +PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY +READ REPLICATION RETENTION REVOKE SELECT SERIES +SLIMIT SOFFSET TAG TO USER USERS +VALUES WHERE WITH WRITE +``` + +## Literals + +### Integers + +InfluxQL supports decimal integer literals. Hexadecimal and octal literals are not currently supported. + +``` +int_lit = ( "1" … "9" ) { digit } . +``` + +### Floats + +InfluxQL supports floating-point literals. Exponents are not currently supported. + +``` +float_lit = int_lit "." int_lit . +``` + +### Strings + +String literals must be surrounded by single quotes. Strings may contain `'` characters as long as they are escaped (i.e., `\'`). + +``` +string_lit = `'` { unicode_char } `'`' . +``` + +### Durations + +Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal. + +``` +Duration unit definitions +------------------------- +| Units | Meaning | +|--------|-----------------------------------------| +| u or µ | microseconds (1 millionth of a second) | +| ms | milliseconds (1 thousandth of a second) | +| s | second | +| m | minute | +| h | hour | +| d | day | +| w | week | +``` + +``` +duration_lit = int_lit duration_unit . +duration_unit = "u" | "µ" | "s" | "h" | "d" | "w" | "ms" . +``` + +### Dates & Times + +The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: + +InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM + +``` +time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" +``` + +### Booleans + +``` +bool_lit = TRUE | FALSE . +``` + +### Regular Expressions + +``` +regex_lit = "/" { unicode_char } "/" . +``` + +## Queries + +A query is composed of one or more statements separated by a semicolon. + +``` +query = statement { ; statement } . + +statement = alter_retention_policy_stmt | + create_continuous_query_stmt | + create_database_stmt | + create_retention_policy_stmt | + create_user_stmt | + delete_stmt | + drop_continuous_query_stmt | + drop_database_stmt | + drop_measurement_stmt | + drop_retention_policy_stmt | + drop_series_stmt | + drop_user_stmt | + grant_stmt | + show_continuous_queries_stmt | + show_databases_stmt | + show_field_keys_stmt | + show_measurements_stmt | + show_retention_policies | + show_series_stmt | + show_tag_keys_stmt | + show_tag_values_stmt | + show_users_stmt | + revoke_stmt | + select_stmt . +``` + +## Statements + +### ALTER RETENTION POLICY + +``` +alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name "ON" + db_name retention_policy_option + [ retention_policy_option ] + [ retention_policy_option ] . + +db_name = identifier . + +policy_name = identifier . + +retention_policy_option = retention_policy_duration | + retention_policy_replication | + "DEFAULT" . + +retention_policy_duration = "DURATION" duration_lit . +retention_policy_replication = "REPLICATION" int_lit +``` + +#### Examples: + +```sql +-- Set default retention policy for mydb to 1h.cpu. +ALTER RETENTION POLICY "1h.cpu" ON mydb DEFAULT; + +-- Change duration and replication factor. +ALTER RETENTION POLICY policy1 ON somedb DURATION 1h REPLICATION 4 +``` + +### CREATE CONTINUOUS QUERY + +``` +create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name "ON" db_name + "BEGIN" select_stmt "END" . + +query_name = identifier . +``` + +#### Examples: + +```sql +-- selects from default retention policy and writes into 6_months retention policy +CREATE CONTINUOUS QUERY "10m_event_count" +ON db_name +BEGIN + SELECT count(value) + INTO "6_months".events + FROM events + GROUP BY time(10m) +END; + +-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy +CREATE CONTINUOUS QUERY "1h_event_count" +ON db_name +BEGIN + SELECT sum(count) as count + INTO "2_years".events + FROM "6_months".events + GROUP BY time(1h) +END; +``` + +### CREATE DATABASE + +``` +create_database_stmt = "CREATE DATABASE" db_name +``` + +#### Example: + +```sql +CREATE DATABASE foo +``` + +### CREATE RETENTION POLICY + +``` +create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name "ON" + db_name retention_policy_duration + retention_policy_replication + [ "DEFAULT" ] . +``` + +#### Examples + +```sql +-- Create a retention policy. +CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2; + +-- Create a retention policy and set it as the default. +CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT; +``` + +### CREATE USER + +``` +create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password + [ "WITH ALL PRIVILEGES" ] . +``` + +#### Examples: + +```sql +-- Create a normal database user. +CREATE USER jdoe WITH PASSWORD '1337password'; + +-- Create a cluster admin. +-- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. +CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES; +``` + +### DELETE + +``` +delete_stmt = "DELETE" from_clause where_clause . +``` + +#### Example: + +```sql +-- delete data points from the cpu measurement where the region tag +-- equals 'uswest' +DELETE FROM cpu WHERE region = 'uswest'; +``` + +### DROP CONTINUOUS QUERY + +drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name . + +#### Example: + +```sql +DROP CONTINUOUS QUERY myquery; +``` + +### DROP DATABASE + +drop_database_stmt = "DROP DATABASE" db_name . + +#### Example: + +```sql +DROP DATABASE mydb; +``` + +### DROP MEASUREMENT + +``` +drop_measurement_stmt = "DROP MEASUREMENT" measurement . +``` + +#### Examples: + +```sql +-- drop the cpu measurement +DROP MEASUREMENT cpu; +``` + +### DROP RETENTION POLICY + +``` +drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name "ON" db_name . +``` + +#### Example: + +```sql +-- drop the retention policy named 1h.cpu from mydb +DROP RETENTION POLICY "1h.cpu" ON mydb; +``` + +### DROP SERIES + +``` +drop_series_stmt = "DROP SERIES" [ from_clause ] [ where_clause ] +``` + +#### Example: + +```sql + +``` + +### DROP USER + +``` +drop_user_stmt = "DROP USER" user_name . +``` + +#### Example: + +```sql +DROP USER jdoe; + +``` + +### GRANT + +NOTE: Users can be granted privileges on databases that do not exist. + +``` +grant_stmt = "GRANT" privilege [ on_clause ] to_clause +``` + +#### Examples: + +```sql +-- grant cluster admin privileges +GRANT ALL TO jdoe; + +-- grant read access to a database +GRANT READ ON mydb TO jdoe; +``` + +### SHOW CONTINUOUS QUERIES + +show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" + +#### Example: + +```sql +-- show all continuous queries +SHOW CONTINUOUS QUERIES; +``` + +### SHOW DATABASES + +``` +show_databases_stmt = "SHOW DATABASES" . +``` + +#### Example: + +```sql +-- show all databases +SHOW DATABASES; +``` + +### SHOW FIELD + +show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . + +#### Examples: + +```sql +-- show field keys from all measurements +SHOW FIELD KEYS; + +-- show field keys from specified measurement +SHOW FIELD KEYS FROM cpu; +``` + +### SHOW MEASUREMENTS + +show_measurements_stmt = [ where_clause ] [ group_by_clause ] [ limit_clause ] + [ offset_clause ] . + +```sql +-- show all measurements +SHOW MEASUREMENTS; + +-- show measurements where region tag = 'uswest' AND host tag = 'serverA' +SHOW MEASUREMENTS WHERE region = 'uswest' AND host = 'serverA'; +``` + +### SHOW RETENTION POLICIES + +``` +show_retention_policies = "SHOW RETENTION POLICIES ON" db_name . +``` + +#### Example: + +```sql +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb; +``` + +### SHOW SERIES + +``` +show_series_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Example: + +```sql + +``` + +### SHOW TAG KEYS + +``` +show_tag_keys_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag keys +SHOW TAG KEYS; + +-- show all tag keys from the cpu measurement +SHOW TAG KEYS FROM cpu; + +-- show all tag keys from the cpu measurement where the region key = 'uswest' +SHOW TAG KEYS FROM cpu WHERE region = 'uswest'; + +-- show all tag keys where the host key = 'serverA' +SHOW TAG KEYS WHERE host = 'serverA'; +``` + +### SHOW TAG VALUES + +``` +show_tag_values_stmt = [ from_clause ] with_tag_clause [ where_clause ] + [ group_by_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag values across all measurements for the region tag +SHOW TAG VALUES WITH TAG = 'region'; + +-- show tag values from the cpu measurement for the region tag +SHOW TAG VALUES FROM cpu WITH TAG = 'region'; + +-- show tag values from the cpu measurement for region & host tag keys where service = 'redis' +SHOW TAG VALUES FROM cpu WITH TAG IN (region, host) WHERE service = 'redis'; +``` + +### SHOW USERS + +``` +show_users_stmt = "SHOW USERS" . +``` + +#### Example: + +```sql +-- show all users +SHOW USERS; +``` + +### REVOKE + +``` +revoke_stmt = privilege [ "ON" db_name ] "FROM" user_name +``` + +#### Examples: + +```sql +-- revoke cluster admin from jdoe +REVOKE ALL PRIVILEGES FROM jdoe; + +-- revoke read privileges from jdoe on mydb +REVOKE READ ON mydb FROM jdoe; +``` + +### SELECT + +``` +select_stmt = fields from_clause [ into_clause ] [ where_clause ] + [ group_by_clause ] [ order_by_clause ] [ limit_clause ] + [ offset_clause ] [ slimit_clause ] [ soffset_clause ]. +``` + +#### Examples: + +```sql +-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals +SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0); +``` + +## Clauses + +``` +from_clause = "FROM" measurements . + +group_by_clause = "GROUP BY" dimensions fill(